rdopt.c 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <stdio.h>
  11. #include <math.h>
  12. #include <limits.h>
  13. #include <assert.h>
  14. #include "vpx_config.h"
  15. #include "vp8_rtcd.h"
  16. #include "./vpx_dsp_rtcd.h"
  17. #include "tokenize.h"
  18. #include "treewriter.h"
  19. #include "onyx_int.h"
  20. #include "modecosts.h"
  21. #include "encodeintra.h"
  22. #include "pickinter.h"
  23. #include "vp8/common/entropymode.h"
  24. #include "vp8/common/reconinter.h"
  25. #include "vp8/common/reconintra.h"
  26. #include "vp8/common/reconintra4x4.h"
  27. #include "vp8/common/findnearmv.h"
  28. #include "vp8/common/quant_common.h"
  29. #include "encodemb.h"
  30. #include "vp8/encoder/quantize.h"
  31. #include "vpx_dsp/variance.h"
  32. #include "mcomp.h"
  33. #include "rdopt.h"
  34. #include "vpx_mem/vpx_mem.h"
  35. #include "vp8/common/systemdependent.h"
  36. #if CONFIG_TEMPORAL_DENOISING
  37. #include "denoising.h"
  38. #endif
  39. extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
  40. #define MAXF(a, b) (((a) > (b)) ? (a) : (b))
  41. typedef struct rate_distortion_struct {
  42. int rate2;
  43. int rate_y;
  44. int rate_uv;
  45. int distortion2;
  46. int distortion_uv;
  47. } RATE_DISTORTION;
  48. typedef struct best_mode_struct {
  49. int yrd;
  50. int rd;
  51. int intra_rd;
  52. MB_MODE_INFO mbmode;
  53. union b_mode_info bmodes[16];
  54. PARTITION_INFO partition;
  55. } BEST_MODE;
  56. static const int auto_speed_thresh[17] = { 1000, 200, 150, 130, 150, 125,
  57. 120, 115, 115, 115, 115, 115,
  58. 115, 115, 115, 115, 105 };
  59. const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES] = {
  60. ZEROMV, DC_PRED,
  61. NEARESTMV, NEARMV,
  62. ZEROMV, NEARESTMV,
  63. ZEROMV, NEARESTMV,
  64. NEARMV, NEARMV,
  65. V_PRED, H_PRED, TM_PRED,
  66. NEWMV, NEWMV, NEWMV,
  67. SPLITMV, SPLITMV, SPLITMV,
  68. B_PRED,
  69. };
  70. /* This table determines the search order in reference frame priority order,
  71. * which may not necessarily match INTRA,LAST,GOLDEN,ARF
  72. */
  73. const int vp8_ref_frame_order[MAX_MODES] = {
  74. 1, 0,
  75. 1, 1,
  76. 2, 2,
  77. 3, 3,
  78. 2, 3,
  79. 0, 0, 0,
  80. 1, 2, 3,
  81. 1, 2, 3,
  82. 0,
  83. };
  84. static void fill_token_costs(int c[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
  85. [MAX_ENTROPY_TOKENS],
  86. const vp8_prob p[BLOCK_TYPES][COEF_BANDS]
  87. [PREV_COEF_CONTEXTS]
  88. [ENTROPY_NODES]) {
  89. int i, j, k;
  90. for (i = 0; i < BLOCK_TYPES; ++i) {
  91. for (j = 0; j < COEF_BANDS; ++j) {
  92. for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
  93. /* check for pt=0 and band > 1 if block type 0
  94. * and 0 if blocktype 1
  95. */
  96. if (k == 0 && j > (i == 0)) {
  97. vp8_cost_tokens2(c[i][j][k], p[i][j][k], vp8_coef_tree, 2);
  98. } else {
  99. vp8_cost_tokens(c[i][j][k], p[i][j][k], vp8_coef_tree);
  100. }
  101. }
  102. }
  103. }
  104. }
  105. static const int rd_iifactor[32] = { 4, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0,
  106. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  107. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  108. /* values are now correlated to quantizer */
  109. static const int sad_per_bit16lut[QINDEX_RANGE] = {
  110. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
  111. 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
  112. 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
  113. 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  114. 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
  115. 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11,
  116. 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14
  117. };
  118. static const int sad_per_bit4lut[QINDEX_RANGE] = {
  119. 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
  120. 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6,
  121. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  122. 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
  123. 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12,
  124. 12, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
  125. 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20,
  126. };
  127. void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex) {
  128. cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex];
  129. cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
  130. }
  131. void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue) {
  132. int q;
  133. int i;
  134. double capped_q = (Qvalue < 160) ? (double)Qvalue : 160.0;
  135. double rdconst = 2.80;
  136. vp8_clear_system_state();
  137. /* Further tests required to see if optimum is different
  138. * for key frames, golden frames and arf frames.
  139. */
  140. cpi->RDMULT = (int)(rdconst * (capped_q * capped_q));
  141. /* Extend rate multiplier along side quantizer zbin increases */
  142. if (cpi->mb.zbin_over_quant > 0) {
  143. double oq_factor;
  144. double modq;
  145. /* Experimental code using the same basic equation as used for Q above
  146. * The units of cpi->mb.zbin_over_quant are 1/128 of Q bin size
  147. */
  148. oq_factor = 1.0 + ((double)0.0015625 * cpi->mb.zbin_over_quant);
  149. modq = (int)((double)capped_q * oq_factor);
  150. cpi->RDMULT = (int)(rdconst * (modq * modq));
  151. }
  152. if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
  153. if (cpi->twopass.next_iiratio > 31) {
  154. cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
  155. } else {
  156. cpi->RDMULT +=
  157. (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
  158. }
  159. }
  160. cpi->mb.errorperbit = (cpi->RDMULT / 110);
  161. cpi->mb.errorperbit += (cpi->mb.errorperbit == 0);
  162. vp8_set_speed_features(cpi);
  163. for (i = 0; i < MAX_MODES; ++i) {
  164. x->mode_test_hit_counts[i] = 0;
  165. }
  166. q = (int)pow(Qvalue, 1.25);
  167. if (q < 8) q = 8;
  168. if (cpi->RDMULT > 1000) {
  169. cpi->RDDIV = 1;
  170. cpi->RDMULT /= 100;
  171. for (i = 0; i < MAX_MODES; ++i) {
  172. if (cpi->sf.thresh_mult[i] < INT_MAX) {
  173. x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
  174. } else {
  175. x->rd_threshes[i] = INT_MAX;
  176. }
  177. cpi->rd_baseline_thresh[i] = x->rd_threshes[i];
  178. }
  179. } else {
  180. cpi->RDDIV = 100;
  181. for (i = 0; i < MAX_MODES; ++i) {
  182. if (cpi->sf.thresh_mult[i] < (INT_MAX / q)) {
  183. x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
  184. } else {
  185. x->rd_threshes[i] = INT_MAX;
  186. }
  187. cpi->rd_baseline_thresh[i] = x->rd_threshes[i];
  188. }
  189. }
  190. {
  191. /* build token cost array for the type of frame we have now */
  192. FRAME_CONTEXT *l = &cpi->lfc_n;
  193. if (cpi->common.refresh_alt_ref_frame) {
  194. l = &cpi->lfc_a;
  195. } else if (cpi->common.refresh_golden_frame) {
  196. l = &cpi->lfc_g;
  197. }
  198. fill_token_costs(cpi->mb.token_costs,
  199. (const vp8_prob(*)[8][3][11])l->coef_probs);
  200. /*
  201. fill_token_costs(
  202. cpi->mb.token_costs,
  203. (const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs);
  204. */
  205. /* TODO make these mode costs depend on last,alt or gold too. (jbb) */
  206. vp8_init_mode_costs(cpi);
  207. }
  208. }
  209. void vp8_auto_select_speed(VP8_COMP *cpi) {
  210. int milliseconds_for_compress = (int)(1000000 / cpi->framerate);
  211. milliseconds_for_compress =
  212. milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
  213. #if 0
  214. if (0)
  215. {
  216. FILE *f;
  217. f = fopen("speed.stt", "a");
  218. fprintf(f, " %8ld %10ld %10ld %10ld\n",
  219. cpi->common.current_video_frame, cpi->Speed, milliseconds_for_compress, cpi->avg_pick_mode_time);
  220. fclose(f);
  221. }
  222. #endif
  223. if (cpi->avg_pick_mode_time < milliseconds_for_compress &&
  224. (cpi->avg_encode_time - cpi->avg_pick_mode_time) <
  225. milliseconds_for_compress) {
  226. if (cpi->avg_pick_mode_time == 0) {
  227. cpi->Speed = 4;
  228. } else {
  229. if (milliseconds_for_compress * 100 < cpi->avg_encode_time * 95) {
  230. cpi->Speed += 2;
  231. cpi->avg_pick_mode_time = 0;
  232. cpi->avg_encode_time = 0;
  233. if (cpi->Speed > 16) {
  234. cpi->Speed = 16;
  235. }
  236. }
  237. if (milliseconds_for_compress * 100 >
  238. cpi->avg_encode_time * auto_speed_thresh[cpi->Speed]) {
  239. cpi->Speed -= 1;
  240. cpi->avg_pick_mode_time = 0;
  241. cpi->avg_encode_time = 0;
  242. /* In real-time mode, cpi->speed is in [4, 16]. */
  243. if (cpi->Speed < 4) {
  244. cpi->Speed = 4;
  245. }
  246. }
  247. }
  248. } else {
  249. cpi->Speed += 4;
  250. if (cpi->Speed > 16) cpi->Speed = 16;
  251. cpi->avg_pick_mode_time = 0;
  252. cpi->avg_encode_time = 0;
  253. }
  254. }
  255. int vp8_block_error_c(short *coeff, short *dqcoeff) {
  256. int i;
  257. int error = 0;
  258. for (i = 0; i < 16; ++i) {
  259. int this_diff = coeff[i] - dqcoeff[i];
  260. error += this_diff * this_diff;
  261. }
  262. return error;
  263. }
  264. int vp8_mbblock_error_c(MACROBLOCK *mb, int dc) {
  265. BLOCK *be;
  266. BLOCKD *bd;
  267. int i, j;
  268. int berror, error = 0;
  269. for (i = 0; i < 16; ++i) {
  270. be = &mb->block[i];
  271. bd = &mb->e_mbd.block[i];
  272. berror = 0;
  273. for (j = dc; j < 16; ++j) {
  274. int this_diff = be->coeff[j] - bd->dqcoeff[j];
  275. berror += this_diff * this_diff;
  276. }
  277. error += berror;
  278. }
  279. return error;
  280. }
  281. int vp8_mbuverror_c(MACROBLOCK *mb) {
  282. BLOCK *be;
  283. BLOCKD *bd;
  284. int i;
  285. int error = 0;
  286. for (i = 16; i < 24; ++i) {
  287. be = &mb->block[i];
  288. bd = &mb->e_mbd.block[i];
  289. error += vp8_block_error_c(be->coeff, bd->dqcoeff);
  290. }
  291. return error;
  292. }
  293. int VP8_UVSSE(MACROBLOCK *x) {
  294. unsigned char *uptr, *vptr;
  295. unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
  296. unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src);
  297. int uv_stride = x->block[16].src_stride;
  298. unsigned int sse1 = 0;
  299. unsigned int sse2 = 0;
  300. int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row;
  301. int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col;
  302. int offset;
  303. int pre_stride = x->e_mbd.pre.uv_stride;
  304. if (mv_row < 0) {
  305. mv_row -= 1;
  306. } else {
  307. mv_row += 1;
  308. }
  309. if (mv_col < 0) {
  310. mv_col -= 1;
  311. } else {
  312. mv_col += 1;
  313. }
  314. mv_row /= 2;
  315. mv_col /= 2;
  316. offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
  317. uptr = x->e_mbd.pre.u_buffer + offset;
  318. vptr = x->e_mbd.pre.v_buffer + offset;
  319. if ((mv_row | mv_col) & 7) {
  320. vpx_sub_pixel_variance8x8(uptr, pre_stride, mv_col & 7, mv_row & 7,
  321. upred_ptr, uv_stride, &sse2);
  322. vpx_sub_pixel_variance8x8(vptr, pre_stride, mv_col & 7, mv_row & 7,
  323. vpred_ptr, uv_stride, &sse1);
  324. sse2 += sse1;
  325. } else {
  326. vpx_variance8x8(uptr, pre_stride, upred_ptr, uv_stride, &sse2);
  327. vpx_variance8x8(vptr, pre_stride, vpred_ptr, uv_stride, &sse1);
  328. sse2 += sse1;
  329. }
  330. return sse2;
  331. }
  332. static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a,
  333. ENTROPY_CONTEXT *l) {
  334. int c = !type; /* start at coef 0, unless Y with Y2 */
  335. int eob = (int)(*b->eob);
  336. int pt; /* surrounding block/prev coef predictor */
  337. int cost = 0;
  338. short *qcoeff_ptr = b->qcoeff;
  339. VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
  340. assert(eob <= 16);
  341. for (; c < eob; ++c) {
  342. const int v = qcoeff_ptr[vp8_default_zig_zag1d[c]];
  343. const int t = vp8_dct_value_tokens_ptr[v].Token;
  344. cost += mb->token_costs[type][vp8_coef_bands[c]][pt][t];
  345. cost += vp8_dct_value_cost_ptr[v];
  346. pt = vp8_prev_token_class[t];
  347. }
  348. if (c < 16) {
  349. cost += mb->token_costs[type][vp8_coef_bands[c]][pt][DCT_EOB_TOKEN];
  350. }
  351. pt = (c != !type); /* is eob first coefficient; */
  352. *a = *l = pt;
  353. return cost;
  354. }
  355. static int vp8_rdcost_mby(MACROBLOCK *mb) {
  356. int cost = 0;
  357. int b;
  358. MACROBLOCKD *x = &mb->e_mbd;
  359. ENTROPY_CONTEXT_PLANES t_above, t_left;
  360. ENTROPY_CONTEXT *ta;
  361. ENTROPY_CONTEXT *tl;
  362. memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
  363. memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
  364. ta = (ENTROPY_CONTEXT *)&t_above;
  365. tl = (ENTROPY_CONTEXT *)&t_left;
  366. for (b = 0; b < 16; ++b) {
  367. cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
  368. ta + vp8_block2above[b], tl + vp8_block2left[b]);
  369. }
  370. cost += cost_coeffs(mb, x->block + 24, PLANE_TYPE_Y2,
  371. ta + vp8_block2above[24], tl + vp8_block2left[24]);
  372. return cost;
  373. }
  374. static void macro_block_yrd(MACROBLOCK *mb, int *Rate, int *Distortion) {
  375. int b;
  376. MACROBLOCKD *const x = &mb->e_mbd;
  377. BLOCK *const mb_y2 = mb->block + 24;
  378. BLOCKD *const x_y2 = x->block + 24;
  379. short *Y2DCPtr = mb_y2->src_diff;
  380. BLOCK *beptr;
  381. int d;
  382. vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src),
  383. mb->block[0].src_stride, mb->e_mbd.predictor, 16);
  384. /* Fdct and building the 2nd order block */
  385. for (beptr = mb->block; beptr < mb->block + 16; beptr += 2) {
  386. mb->short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
  387. *Y2DCPtr++ = beptr->coeff[0];
  388. *Y2DCPtr++ = beptr->coeff[16];
  389. }
  390. /* 2nd order fdct */
  391. mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
  392. /* Quantization */
  393. for (b = 0; b < 16; ++b) {
  394. mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
  395. }
  396. /* DC predication and Quantization of 2nd Order block */
  397. mb->quantize_b(mb_y2, x_y2);
  398. /* Distortion */
  399. d = vp8_mbblock_error(mb, 1) << 2;
  400. d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff);
  401. *Distortion = (d >> 4);
  402. /* rate */
  403. *Rate = vp8_rdcost_mby(mb);
  404. }
  405. static void copy_predictor(unsigned char *dst, const unsigned char *predictor) {
  406. const unsigned int *p = (const unsigned int *)predictor;
  407. unsigned int *d = (unsigned int *)dst;
  408. d[0] = p[0];
  409. d[4] = p[4];
  410. d[8] = p[8];
  411. d[12] = p[12];
  412. }
  413. static int rd_pick_intra4x4block(MACROBLOCK *x, BLOCK *be, BLOCKD *b,
  414. B_PREDICTION_MODE *best_mode,
  415. const int *bmode_costs, ENTROPY_CONTEXT *a,
  416. ENTROPY_CONTEXT *l,
  417. int *bestrate, int *bestratey,
  418. int *bestdistortion) {
  419. B_PREDICTION_MODE mode;
  420. int best_rd = INT_MAX;
  421. int rate = 0;
  422. int distortion;
  423. ENTROPY_CONTEXT ta = *a, tempa = *a;
  424. ENTROPY_CONTEXT tl = *l, templ = *l;
  425. /*
  426. * The predictor buffer is a 2d buffer with a stride of 16. Create
  427. * a temp buffer that meets the stride requirements, but we are only
  428. * interested in the left 4x4 block
  429. * */
  430. DECLARE_ALIGNED(16, unsigned char, best_predictor[16 * 4]);
  431. DECLARE_ALIGNED(16, short, best_dqcoeff[16]);
  432. int dst_stride = x->e_mbd.dst.y_stride;
  433. unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset;
  434. unsigned char *Above = dst - dst_stride;
  435. unsigned char *yleft = dst - 1;
  436. unsigned char top_left = Above[-1];
  437. for (mode = B_DC_PRED; mode <= B_HU_PRED; ++mode) {
  438. int this_rd;
  439. int ratey;
  440. rate = bmode_costs[mode];
  441. vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16,
  442. top_left);
  443. vp8_subtract_b(be, b, 16);
  444. x->short_fdct4x4(be->src_diff, be->coeff, 32);
  445. x->quantize_b(be, b);
  446. tempa = ta;
  447. templ = tl;
  448. ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ);
  449. rate += ratey;
  450. distortion = vp8_block_error(be->coeff, b->dqcoeff) >> 2;
  451. this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
  452. if (this_rd < best_rd) {
  453. *bestrate = rate;
  454. *bestratey = ratey;
  455. *bestdistortion = distortion;
  456. best_rd = this_rd;
  457. *best_mode = mode;
  458. *a = tempa;
  459. *l = templ;
  460. copy_predictor(best_predictor, b->predictor);
  461. memcpy(best_dqcoeff, b->dqcoeff, 32);
  462. }
  463. }
  464. b->bmi.as_mode = *best_mode;
  465. vp8_short_idct4x4llm(best_dqcoeff, best_predictor, 16, dst, dst_stride);
  466. return best_rd;
  467. }
  468. static int rd_pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *rate_y,
  469. int *Distortion, int best_rd) {
  470. MACROBLOCKD *const xd = &mb->e_mbd;
  471. int i;
  472. int cost = mb->mbmode_cost[xd->frame_type][B_PRED];
  473. int distortion = 0;
  474. int tot_rate_y = 0;
  475. int64_t total_rd = 0;
  476. ENTROPY_CONTEXT_PLANES t_above, t_left;
  477. ENTROPY_CONTEXT *ta;
  478. ENTROPY_CONTEXT *tl;
  479. const int *bmode_costs;
  480. memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
  481. memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
  482. ta = (ENTROPY_CONTEXT *)&t_above;
  483. tl = (ENTROPY_CONTEXT *)&t_left;
  484. intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
  485. bmode_costs = mb->inter_bmode_costs;
  486. for (i = 0; i < 16; ++i) {
  487. MODE_INFO *const mic = xd->mode_info_context;
  488. const int mis = xd->mode_info_stride;
  489. B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
  490. int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry),
  491. UNINITIALIZED_IS_SAFE(d);
  492. if (mb->e_mbd.frame_type == KEY_FRAME) {
  493. const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
  494. const B_PREDICTION_MODE L = left_block_mode(mic, i);
  495. bmode_costs = mb->bmode_costs[A][L];
  496. }
  497. total_rd += rd_pick_intra4x4block(
  498. mb, mb->block + i, xd->block + i, &best_mode, bmode_costs,
  499. ta + vp8_block2above[i], tl + vp8_block2left[i], &r, &ry, &d);
  500. cost += r;
  501. distortion += d;
  502. tot_rate_y += ry;
  503. mic->bmi[i].as_mode = best_mode;
  504. if (total_rd >= (int64_t)best_rd) break;
  505. }
  506. if (total_rd >= (int64_t)best_rd) return INT_MAX;
  507. *Rate = cost;
  508. *rate_y = tot_rate_y;
  509. *Distortion = distortion;
  510. return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
  511. }
  512. static int rd_pick_intra16x16mby_mode(MACROBLOCK *x, int *Rate, int *rate_y,
  513. int *Distortion) {
  514. MB_PREDICTION_MODE mode;
  515. MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
  516. int rate, ratey;
  517. int distortion;
  518. int best_rd = INT_MAX;
  519. int this_rd;
  520. MACROBLOCKD *xd = &x->e_mbd;
  521. /* Y Search for 16x16 intra prediction mode */
  522. for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
  523. xd->mode_info_context->mbmi.mode = mode;
  524. vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
  525. xd->dst.y_buffer - 1, xd->dst.y_stride,
  526. xd->predictor, 16);
  527. macro_block_yrd(x, &ratey, &distortion);
  528. rate = ratey +
  529. x->mbmode_cost[xd->frame_type][xd->mode_info_context->mbmi.mode];
  530. this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
  531. if (this_rd < best_rd) {
  532. mode_selected = mode;
  533. best_rd = this_rd;
  534. *Rate = rate;
  535. *rate_y = ratey;
  536. *Distortion = distortion;
  537. }
  538. }
  539. xd->mode_info_context->mbmi.mode = mode_selected;
  540. return best_rd;
  541. }
  542. static int rd_cost_mbuv(MACROBLOCK *mb) {
  543. int b;
  544. int cost = 0;
  545. MACROBLOCKD *x = &mb->e_mbd;
  546. ENTROPY_CONTEXT_PLANES t_above, t_left;
  547. ENTROPY_CONTEXT *ta;
  548. ENTROPY_CONTEXT *tl;
  549. memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
  550. memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
  551. ta = (ENTROPY_CONTEXT *)&t_above;
  552. tl = (ENTROPY_CONTEXT *)&t_left;
  553. for (b = 16; b < 24; ++b) {
  554. cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV,
  555. ta + vp8_block2above[b], tl + vp8_block2left[b]);
  556. }
  557. return cost;
  558. }
  559. static int rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
  560. int *distortion, int fullpixel) {
  561. (void)cpi;
  562. (void)fullpixel;
  563. vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
  564. vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
  565. x->src.uv_stride, &x->e_mbd.predictor[256],
  566. &x->e_mbd.predictor[320], 8);
  567. vp8_transform_mbuv(x);
  568. vp8_quantize_mbuv(x);
  569. *rate = rd_cost_mbuv(x);
  570. *distortion = vp8_mbuverror(x) / 4;
  571. return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
  572. }
  573. static int rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
  574. int *distortion, int fullpixel) {
  575. (void)cpi;
  576. (void)fullpixel;
  577. vp8_build_inter4x4_predictors_mbuv(&x->e_mbd);
  578. vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
  579. x->src.uv_stride, &x->e_mbd.predictor[256],
  580. &x->e_mbd.predictor[320], 8);
  581. vp8_transform_mbuv(x);
  582. vp8_quantize_mbuv(x);
  583. *rate = rd_cost_mbuv(x);
  584. *distortion = vp8_mbuverror(x) / 4;
  585. return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
  586. }
  587. static void rd_pick_intra_mbuv_mode(MACROBLOCK *x, int *rate,
  588. int *rate_tokenonly, int *distortion) {
  589. MB_PREDICTION_MODE mode;
  590. MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
  591. int best_rd = INT_MAX;
  592. int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
  593. int rate_to;
  594. MACROBLOCKD *xd = &x->e_mbd;
  595. for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
  596. int this_rate;
  597. int this_distortion;
  598. int this_rd;
  599. xd->mode_info_context->mbmi.uv_mode = mode;
  600. vp8_build_intra_predictors_mbuv_s(
  601. xd, xd->dst.u_buffer - xd->dst.uv_stride,
  602. xd->dst.v_buffer - xd->dst.uv_stride, xd->dst.u_buffer - 1,
  603. xd->dst.v_buffer - 1, xd->dst.uv_stride, &xd->predictor[256],
  604. &xd->predictor[320], 8);
  605. vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
  606. x->src.uv_stride, &xd->predictor[256],
  607. &xd->predictor[320], 8);
  608. vp8_transform_mbuv(x);
  609. vp8_quantize_mbuv(x);
  610. rate_to = rd_cost_mbuv(x);
  611. this_rate = rate_to +
  612. x->intra_uv_mode_cost[xd->frame_type]
  613. [xd->mode_info_context->mbmi.uv_mode];
  614. this_distortion = vp8_mbuverror(x) / 4;
  615. this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
  616. if (this_rd < best_rd) {
  617. best_rd = this_rd;
  618. d = this_distortion;
  619. r = this_rate;
  620. *rate_tokenonly = rate_to;
  621. mode_selected = mode;
  622. }
  623. }
  624. *rate = r;
  625. *distortion = d;
  626. xd->mode_info_context->mbmi.uv_mode = mode_selected;
  627. }
  628. int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]) {
  629. vp8_prob p[VP8_MVREFS - 1];
  630. assert(NEARESTMV <= m && m <= SPLITMV);
  631. vp8_mv_ref_probs(p, near_mv_ref_ct);
  632. return vp8_cost_token(vp8_mv_ref_tree, p,
  633. vp8_mv_ref_encoding_array + (m - NEARESTMV));
  634. }
  635. void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
  636. x->e_mbd.mode_info_context->mbmi.mode = mb;
  637. x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int;
  638. }
  639. static int labels2mode(MACROBLOCK *x, int const *labelings, int which_label,
  640. B_PREDICTION_MODE this_mode, int_mv *this_mv,
  641. int_mv *best_ref_mv, int *mvcost[2]) {
  642. MACROBLOCKD *const xd = &x->e_mbd;
  643. MODE_INFO *const mic = xd->mode_info_context;
  644. const int mis = xd->mode_info_stride;
  645. int cost = 0;
  646. int thismvcost = 0;
  647. /* We have to be careful retrieving previously-encoded motion vectors.
  648. Ones from this macroblock have to be pulled from the BLOCKD array
  649. as they have not yet made it to the bmi array in our MB_MODE_INFO. */
  650. int i = 0;
  651. do {
  652. BLOCKD *const d = xd->block + i;
  653. const int row = i >> 2, col = i & 3;
  654. B_PREDICTION_MODE m;
  655. if (labelings[i] != which_label) continue;
  656. if (col && labelings[i] == labelings[i - 1]) {
  657. m = LEFT4X4;
  658. } else if (row && labelings[i] == labelings[i - 4]) {
  659. m = ABOVE4X4;
  660. } else {
  661. /* the only time we should do costing for new motion vector
  662. * or mode is when we are on a new label (jbb May 08, 2007)
  663. */
  664. switch (m = this_mode) {
  665. case NEW4X4:
  666. thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
  667. break;
  668. case LEFT4X4:
  669. this_mv->as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i);
  670. break;
  671. case ABOVE4X4:
  672. this_mv->as_int =
  673. row ? d[-4].bmi.mv.as_int : above_block_mv(mic, i, mis);
  674. break;
  675. case ZERO4X4: this_mv->as_int = 0; break;
  676. default: break;
  677. }
  678. if (m == ABOVE4X4) /* replace above with left if same */
  679. {
  680. int_mv left_mv;
  681. left_mv.as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i);
  682. if (left_mv.as_int == this_mv->as_int) m = LEFT4X4;
  683. }
  684. cost = x->inter_bmode_costs[m];
  685. }
  686. d->bmi.mv.as_int = this_mv->as_int;
  687. x->partition_info->bmi[i].mode = m;
  688. x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
  689. } while (++i < 16);
  690. cost += thismvcost;
  691. return cost;
  692. }
  693. static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels,
  694. int which_label, ENTROPY_CONTEXT *ta,
  695. ENTROPY_CONTEXT *tl) {
  696. int cost = 0;
  697. int b;
  698. MACROBLOCKD *x = &mb->e_mbd;
  699. for (b = 0; b < 16; ++b) {
  700. if (labels[b] == which_label) {
  701. cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC,
  702. ta + vp8_block2above[b], tl + vp8_block2left[b]);
  703. }
  704. }
  705. return cost;
  706. }
  707. static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x,
  708. int const *labels,
  709. int which_label) {
  710. int i;
  711. unsigned int distortion = 0;
  712. int pre_stride = x->e_mbd.pre.y_stride;
  713. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  714. for (i = 0; i < 16; ++i) {
  715. if (labels[i] == which_label) {
  716. BLOCKD *bd = &x->e_mbd.block[i];
  717. BLOCK *be = &x->block[i];
  718. vp8_build_inter_predictors_b(bd, 16, base_pre, pre_stride,
  719. x->e_mbd.subpixel_predict);
  720. vp8_subtract_b(be, bd, 16);
  721. x->short_fdct4x4(be->src_diff, be->coeff, 32);
  722. x->quantize_b(be, bd);
  723. distortion += vp8_block_error(be->coeff, bd->dqcoeff);
  724. }
  725. }
  726. return distortion;
  727. }
  728. static const unsigned int segmentation_to_sseshift[4] = { 3, 3, 2, 0 };
  729. typedef struct {
  730. int_mv *ref_mv;
  731. int_mv mvp;
  732. int segment_rd;
  733. int segment_num;
  734. int r;
  735. int d;
  736. int segment_yrate;
  737. B_PREDICTION_MODE modes[16];
  738. int_mv mvs[16];
  739. unsigned char eobs[16];
  740. int mvthresh;
  741. int *mdcounts;
  742. int_mv sv_mvp[4]; /* save 4 mvp from 8x8 */
  743. int sv_istep[2]; /* save 2 initial step_param for 16x8/8x16 */
  744. } BEST_SEG_INFO;
  745. static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
  746. unsigned int segmentation) {
  747. int i;
  748. int const *labels;
  749. int br = 0;
  750. int bd = 0;
  751. B_PREDICTION_MODE this_mode;
  752. int label_count;
  753. int this_segment_rd = 0;
  754. int label_mv_thresh;
  755. int rate = 0;
  756. int sbr = 0;
  757. int sbd = 0;
  758. int segmentyrate = 0;
  759. vp8_variance_fn_ptr_t *v_fn_ptr;
  760. ENTROPY_CONTEXT_PLANES t_above, t_left;
  761. ENTROPY_CONTEXT *ta;
  762. ENTROPY_CONTEXT *tl;
  763. ENTROPY_CONTEXT_PLANES t_above_b, t_left_b;
  764. ENTROPY_CONTEXT *ta_b;
  765. ENTROPY_CONTEXT *tl_b;
  766. memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
  767. memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
  768. ta = (ENTROPY_CONTEXT *)&t_above;
  769. tl = (ENTROPY_CONTEXT *)&t_left;
  770. ta_b = (ENTROPY_CONTEXT *)&t_above_b;
  771. tl_b = (ENTROPY_CONTEXT *)&t_left_b;
  772. br = 0;
  773. bd = 0;
  774. v_fn_ptr = &cpi->fn_ptr[segmentation];
  775. labels = vp8_mbsplits[segmentation];
  776. label_count = vp8_mbsplit_count[segmentation];
  777. /* 64 makes this threshold really big effectively making it so that we
  778. * very rarely check mvs on segments. setting this to 1 would make mv
  779. * thresh roughly equal to what it is for macroblocks
  780. */
  781. label_mv_thresh = 1 * bsi->mvthresh / label_count;
  782. /* Segmentation method overheads */
  783. rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs,
  784. vp8_mbsplit_encodings + segmentation);
  785. rate += vp8_cost_mv_ref(SPLITMV, bsi->mdcounts);
  786. this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
  787. br += rate;
  788. for (i = 0; i < label_count; ++i) {
  789. int_mv mode_mv[B_MODE_COUNT];
  790. int best_label_rd = INT_MAX;
  791. B_PREDICTION_MODE mode_selected = ZERO4X4;
  792. int bestlabelyrate = 0;
  793. /* search for the best motion vector on this segment */
  794. for (this_mode = LEFT4X4; this_mode <= NEW4X4; ++this_mode) {
  795. int this_rd;
  796. int distortion;
  797. int labelyrate;
  798. ENTROPY_CONTEXT_PLANES t_above_s, t_left_s;
  799. ENTROPY_CONTEXT *ta_s;
  800. ENTROPY_CONTEXT *tl_s;
  801. memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
  802. memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
  803. ta_s = (ENTROPY_CONTEXT *)&t_above_s;
  804. tl_s = (ENTROPY_CONTEXT *)&t_left_s;
  805. if (this_mode == NEW4X4) {
  806. int sseshift;
  807. int num00;
  808. int step_param = 0;
  809. int further_steps;
  810. int n;
  811. int thissme;
  812. int bestsme = INT_MAX;
  813. int_mv temp_mv;
  814. BLOCK *c;
  815. BLOCKD *e;
  816. /* Is the best so far sufficiently good that we cant justify
  817. * doing a new motion search.
  818. */
  819. if (best_label_rd < label_mv_thresh) break;
  820. if (cpi->compressor_speed) {
  821. if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8) {
  822. bsi->mvp.as_int = bsi->sv_mvp[i].as_int;
  823. if (i == 1 && segmentation == BLOCK_16X8) {
  824. bsi->mvp.as_int = bsi->sv_mvp[2].as_int;
  825. }
  826. step_param = bsi->sv_istep[i];
  827. }
  828. /* use previous block's result as next block's MV
  829. * predictor.
  830. */
  831. if (segmentation == BLOCK_4X4 && i > 0) {
  832. bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.mv.as_int;
  833. if (i == 4 || i == 8 || i == 12) {
  834. bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.mv.as_int;
  835. }
  836. step_param = 2;
  837. }
  838. }
  839. further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
  840. {
  841. int sadpb = x->sadperbit4;
  842. int_mv mvp_full;
  843. mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3;
  844. mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
  845. /* find first label */
  846. n = vp8_mbsplit_offset[segmentation][i];
  847. c = &x->block[n];
  848. e = &x->e_mbd.block[n];
  849. {
  850. bestsme = cpi->diamond_search_sad(
  851. x, c, e, &mvp_full, &mode_mv[NEW4X4], step_param, sadpb, &num00,
  852. v_fn_ptr, x->mvcost, bsi->ref_mv);
  853. n = num00;
  854. num00 = 0;
  855. while (n < further_steps) {
  856. n++;
  857. if (num00) {
  858. num00--;
  859. } else {
  860. thissme = cpi->diamond_search_sad(
  861. x, c, e, &mvp_full, &temp_mv, step_param + n, sadpb, &num00,
  862. v_fn_ptr, x->mvcost, bsi->ref_mv);
  863. if (thissme < bestsme) {
  864. bestsme = thissme;
  865. mode_mv[NEW4X4].as_int = temp_mv.as_int;
  866. }
  867. }
  868. }
  869. }
  870. sseshift = segmentation_to_sseshift[segmentation];
  871. /* Should we do a full search (best quality only) */
  872. if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000) {
  873. /* Check if mvp_full is within the range. */
  874. vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min,
  875. x->mv_row_max);
  876. thissme = cpi->full_search_sad(x, c, e, &mvp_full, sadpb, 16,
  877. v_fn_ptr, x->mvcost, bsi->ref_mv);
  878. if (thissme < bestsme) {
  879. bestsme = thissme;
  880. mode_mv[NEW4X4].as_int = e->bmi.mv.as_int;
  881. } else {
  882. /* The full search result is actually worse so
  883. * re-instate the previous best vector
  884. */
  885. e->bmi.mv.as_int = mode_mv[NEW4X4].as_int;
  886. }
  887. }
  888. }
  889. if (bestsme < INT_MAX) {
  890. int disto;
  891. unsigned int sse;
  892. cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4], bsi->ref_mv,
  893. x->errorperbit, v_fn_ptr, x->mvcost,
  894. &disto, &sse);
  895. }
  896. } /* NEW4X4 */
  897. rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
  898. bsi->ref_mv, x->mvcost);
  899. /* Trap vectors that reach beyond the UMV borders */
  900. if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
  901. ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
  902. ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
  903. ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
  904. continue;
  905. }
  906. distortion = vp8_encode_inter_mb_segment(x, labels, i) / 4;
  907. labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s);
  908. rate += labelyrate;
  909. this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
  910. if (this_rd < best_label_rd) {
  911. sbr = rate;
  912. sbd = distortion;
  913. bestlabelyrate = labelyrate;
  914. mode_selected = this_mode;
  915. best_label_rd = this_rd;
  916. memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
  917. memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
  918. }
  919. } /*for each 4x4 mode*/
  920. memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
  921. memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
  922. labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
  923. bsi->ref_mv, x->mvcost);
  924. br += sbr;
  925. bd += sbd;
  926. segmentyrate += bestlabelyrate;
  927. this_segment_rd += best_label_rd;
  928. if (this_segment_rd >= bsi->segment_rd) break;
  929. } /* for each label */
  930. if (this_segment_rd < bsi->segment_rd) {
  931. bsi->r = br;
  932. bsi->d = bd;
  933. bsi->segment_yrate = segmentyrate;
  934. bsi->segment_rd = this_segment_rd;
  935. bsi->segment_num = segmentation;
  936. /* store everything needed to come back to this!! */
  937. for (i = 0; i < 16; ++i) {
  938. bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
  939. bsi->modes[i] = x->partition_info->bmi[i].mode;
  940. bsi->eobs[i] = x->e_mbd.eobs[i];
  941. }
  942. }
  943. }
  944. static void vp8_cal_step_param(int sr, int *sp) {
  945. int step = 0;
  946. if (sr > MAX_FIRST_STEP) {
  947. sr = MAX_FIRST_STEP;
  948. } else if (sr < 1) {
  949. sr = 1;
  950. }
  951. while (sr >>= 1) step++;
  952. *sp = MAX_MVSEARCH_STEPS - 1 - step;
  953. }
  954. static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
  955. int_mv *best_ref_mv, int best_rd,
  956. int *mdcounts, int *returntotrate,
  957. int *returnyrate,
  958. int *returndistortion,
  959. int mvthresh) {
  960. int i;
  961. BEST_SEG_INFO bsi;
  962. memset(&bsi, 0, sizeof(bsi));
  963. bsi.segment_rd = best_rd;
  964. bsi.ref_mv = best_ref_mv;
  965. bsi.mvp.as_int = best_ref_mv->as_int;
  966. bsi.mvthresh = mvthresh;
  967. bsi.mdcounts = mdcounts;
  968. for (i = 0; i < 16; ++i) {
  969. bsi.modes[i] = ZERO4X4;
  970. }
  971. if (cpi->compressor_speed == 0) {
  972. /* for now, we will keep the original segmentation order
  973. when in best quality mode */
  974. rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
  975. rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
  976. rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
  977. rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
  978. } else {
  979. int sr;
  980. rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
  981. if (bsi.segment_rd < best_rd) {
  982. int col_min = ((best_ref_mv->as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
  983. int row_min = ((best_ref_mv->as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
  984. int col_max = (best_ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL;
  985. int row_max = (best_ref_mv->as_mv.row >> 3) + MAX_FULL_PEL_VAL;
  986. int tmp_col_min = x->mv_col_min;
  987. int tmp_col_max = x->mv_col_max;
  988. int tmp_row_min = x->mv_row_min;
  989. int tmp_row_max = x->mv_row_max;
  990. /* Get intersection of UMV window and valid MV window to reduce # of
  991. * checks in diamond search. */
  992. if (x->mv_col_min < col_min) x->mv_col_min = col_min;
  993. if (x->mv_col_max > col_max) x->mv_col_max = col_max;
  994. if (x->mv_row_min < row_min) x->mv_row_min = row_min;
  995. if (x->mv_row_max > row_max) x->mv_row_max = row_max;
  996. /* Get 8x8 result */
  997. bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int;
  998. bsi.sv_mvp[1].as_int = bsi.mvs[2].as_int;
  999. bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int;
  1000. bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int;
  1001. /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range
  1002. * according to the closeness of 2 MV. */
  1003. /* block 8X16 */
  1004. {
  1005. sr =
  1006. MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row)) >> 3,
  1007. (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col)) >> 3);
  1008. vp8_cal_step_param(sr, &bsi.sv_istep[0]);
  1009. sr =
  1010. MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
  1011. (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
  1012. vp8_cal_step_param(sr, &bsi.sv_istep[1]);
  1013. rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
  1014. }
  1015. /* block 16X8 */
  1016. {
  1017. sr =
  1018. MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row)) >> 3,
  1019. (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col)) >> 3);
  1020. vp8_cal_step_param(sr, &bsi.sv_istep[0]);
  1021. sr =
  1022. MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
  1023. (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
  1024. vp8_cal_step_param(sr, &bsi.sv_istep[1]);
  1025. rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
  1026. }
  1027. /* If 8x8 is better than 16x8/8x16, then do 4x4 search */
  1028. /* Not skip 4x4 if speed=0 (good quality) */
  1029. if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8)
  1030. /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
  1031. {
  1032. bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
  1033. rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
  1034. }
  1035. /* restore UMV window */
  1036. x->mv_col_min = tmp_col_min;
  1037. x->mv_col_max = tmp_col_max;
  1038. x->mv_row_min = tmp_row_min;
  1039. x->mv_row_max = tmp_row_max;
  1040. }
  1041. }
  1042. /* set it to the best */
  1043. for (i = 0; i < 16; ++i) {
  1044. BLOCKD *bd = &x->e_mbd.block[i];
  1045. bd->bmi.mv.as_int = bsi.mvs[i].as_int;
  1046. *bd->eob = bsi.eobs[i];
  1047. }
  1048. *returntotrate = bsi.r;
  1049. *returndistortion = bsi.d;
  1050. *returnyrate = bsi.segment_yrate;
  1051. /* save partitions */
  1052. x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num;
  1053. x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
  1054. for (i = 0; i < x->partition_info->count; ++i) {
  1055. int j;
  1056. j = vp8_mbsplit_offset[bsi.segment_num][i];
  1057. x->partition_info->bmi[i].mode = bsi.modes[j];
  1058. x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
  1059. }
  1060. /*
  1061. * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
  1062. */
  1063. x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
  1064. return bsi.segment_rd;
  1065. }
  1066. /* The improved MV prediction */
  1067. void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
  1068. int_mv *mvp, int refframe, int *ref_frame_sign_bias, int *sr,
  1069. int near_sadidx[]) {
  1070. const MODE_INFO *above = here - xd->mode_info_stride;
  1071. const MODE_INFO *left = here - 1;
  1072. const MODE_INFO *aboveleft = above - 1;
  1073. int_mv near_mvs[8];
  1074. int near_ref[8];
  1075. int_mv mv;
  1076. int vcnt = 0;
  1077. int find = 0;
  1078. int mb_offset;
  1079. int mvx[8];
  1080. int mvy[8];
  1081. int i;
  1082. mv.as_int = 0;
  1083. if (here->mbmi.ref_frame != INTRA_FRAME) {
  1084. near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int =
  1085. near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int =
  1086. near_mvs[6].as_int = near_mvs[7].as_int = 0;
  1087. near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] =
  1088. near_ref[5] = near_ref[6] = near_ref[7] = 0;
  1089. /* read in 3 nearby block's MVs from current frame as prediction
  1090. * candidates.
  1091. */
  1092. if (above->mbmi.ref_frame != INTRA_FRAME) {
  1093. near_mvs[vcnt].as_int = above->mbmi.mv.as_int;
  1094. mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe,
  1095. &near_mvs[vcnt], ref_frame_sign_bias);
  1096. near_ref[vcnt] = above->mbmi.ref_frame;
  1097. }
  1098. vcnt++;
  1099. if (left->mbmi.ref_frame != INTRA_FRAME) {
  1100. near_mvs[vcnt].as_int = left->mbmi.mv.as_int;
  1101. mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe,
  1102. &near_mvs[vcnt], ref_frame_sign_bias);
  1103. near_ref[vcnt] = left->mbmi.ref_frame;
  1104. }
  1105. vcnt++;
  1106. if (aboveleft->mbmi.ref_frame != INTRA_FRAME) {
  1107. near_mvs[vcnt].as_int = aboveleft->mbmi.mv.as_int;
  1108. mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe,
  1109. &near_mvs[vcnt], ref_frame_sign_bias);
  1110. near_ref[vcnt] = aboveleft->mbmi.ref_frame;
  1111. }
  1112. vcnt++;
  1113. /* read in 5 nearby block's MVs from last frame. */
  1114. if (cpi->common.last_frame_type != KEY_FRAME) {
  1115. mb_offset = (-xd->mb_to_top_edge / 128 + 1) * (xd->mode_info_stride + 1) +
  1116. (-xd->mb_to_left_edge / 128 + 1);
  1117. /* current in last frame */
  1118. if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME) {
  1119. near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int;
  1120. mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset], refframe,
  1121. &near_mvs[vcnt], ref_frame_sign_bias);
  1122. near_ref[vcnt] = cpi->lf_ref_frame[mb_offset];
  1123. }
  1124. vcnt++;
  1125. /* above in last frame */
  1126. if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride - 1] !=
  1127. INTRA_FRAME) {
  1128. near_mvs[vcnt].as_int =
  1129. cpi->lfmv[mb_offset - xd->mode_info_stride - 1].as_int;
  1130. mv_bias(
  1131. cpi->lf_ref_frame_sign_bias[mb_offset - xd->mode_info_stride - 1],
  1132. refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1133. near_ref[vcnt] =
  1134. cpi->lf_ref_frame[mb_offset - xd->mode_info_stride - 1];
  1135. }
  1136. vcnt++;
  1137. /* left in last frame */
  1138. if (cpi->lf_ref_frame[mb_offset - 1] != INTRA_FRAME) {
  1139. near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - 1].as_int;
  1140. mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset - 1], refframe,
  1141. &near_mvs[vcnt], ref_frame_sign_bias);
  1142. near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - 1];
  1143. }
  1144. vcnt++;
  1145. /* right in last frame */
  1146. if (cpi->lf_ref_frame[mb_offset + 1] != INTRA_FRAME) {
  1147. near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + 1].as_int;
  1148. mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset + 1], refframe,
  1149. &near_mvs[vcnt], ref_frame_sign_bias);
  1150. near_ref[vcnt] = cpi->lf_ref_frame[mb_offset + 1];
  1151. }
  1152. vcnt++;
  1153. /* below in last frame */
  1154. if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride + 1] !=
  1155. INTRA_FRAME) {
  1156. near_mvs[vcnt].as_int =
  1157. cpi->lfmv[mb_offset + xd->mode_info_stride + 1].as_int;
  1158. mv_bias(
  1159. cpi->lf_ref_frame_sign_bias[mb_offset + xd->mode_info_stride + 1],
  1160. refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1161. near_ref[vcnt] =
  1162. cpi->lf_ref_frame[mb_offset + xd->mode_info_stride + 1];
  1163. }
  1164. vcnt++;
  1165. }
  1166. for (i = 0; i < vcnt; ++i) {
  1167. if (near_ref[near_sadidx[i]] != INTRA_FRAME) {
  1168. if (here->mbmi.ref_frame == near_ref[near_sadidx[i]]) {
  1169. mv.as_int = near_mvs[near_sadidx[i]].as_int;
  1170. find = 1;
  1171. if (i < 3) {
  1172. *sr = 3;
  1173. } else {
  1174. *sr = 2;
  1175. }
  1176. break;
  1177. }
  1178. }
  1179. }
  1180. if (!find) {
  1181. for (i = 0; i < vcnt; ++i) {
  1182. mvx[i] = near_mvs[i].as_mv.row;
  1183. mvy[i] = near_mvs[i].as_mv.col;
  1184. }
  1185. insertsortmv(mvx, vcnt);
  1186. insertsortmv(mvy, vcnt);
  1187. mv.as_mv.row = mvx[vcnt / 2];
  1188. mv.as_mv.col = mvy[vcnt / 2];
  1189. /* sr is set to 0 to allow calling function to decide the search
  1190. * range.
  1191. */
  1192. *sr = 0;
  1193. }
  1194. }
  1195. /* Set up return values */
  1196. mvp->as_int = mv.as_int;
  1197. vp8_clamp_mv2(mvp, xd);
  1198. }
  1199. void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x,
  1200. int recon_yoffset, int near_sadidx[]) {
  1201. /* near_sad indexes:
  1202. * 0-cf above, 1-cf left, 2-cf aboveleft,
  1203. * 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
  1204. */
  1205. int near_sad[8] = { 0 };
  1206. BLOCK *b = &x->block[0];
  1207. unsigned char *src_y_ptr = *(b->base_src);
  1208. /* calculate sad for current frame 3 nearby MBs. */
  1209. if (xd->mb_to_top_edge == 0 && xd->mb_to_left_edge == 0) {
  1210. near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX;
  1211. } else if (xd->mb_to_top_edge ==
  1212. 0) { /* only has left MB for sad calculation. */
  1213. near_sad[0] = near_sad[2] = INT_MAX;
  1214. near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(
  1215. src_y_ptr, b->src_stride, xd->dst.y_buffer - 16, xd->dst.y_stride);
  1216. } else if (xd->mb_to_left_edge ==
  1217. 0) { /* only has left MB for sad calculation. */
  1218. near_sad[1] = near_sad[2] = INT_MAX;
  1219. near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(
  1220. src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16,
  1221. xd->dst.y_stride);
  1222. } else {
  1223. near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(
  1224. src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16,
  1225. xd->dst.y_stride);
  1226. near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(
  1227. src_y_ptr, b->src_stride, xd->dst.y_buffer - 16, xd->dst.y_stride);
  1228. near_sad[2] = cpi->fn_ptr[BLOCK_16X16].sdf(
  1229. src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16 - 16,
  1230. xd->dst.y_stride);
  1231. }
  1232. if (cpi->common.last_frame_type != KEY_FRAME) {
  1233. /* calculate sad for last frame 5 nearby MBs. */
  1234. unsigned char *pre_y_buffer =
  1235. cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_buffer + recon_yoffset;
  1236. int pre_y_stride = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_stride;
  1237. if (xd->mb_to_top_edge == 0) near_sad[4] = INT_MAX;
  1238. if (xd->mb_to_left_edge == 0) near_sad[5] = INT_MAX;
  1239. if (xd->mb_to_right_edge == 0) near_sad[6] = INT_MAX;
  1240. if (xd->mb_to_bottom_edge == 0) near_sad[7] = INT_MAX;
  1241. if (near_sad[4] != INT_MAX) {
  1242. near_sad[4] = cpi->fn_ptr[BLOCK_16X16].sdf(
  1243. src_y_ptr, b->src_stride, pre_y_buffer - pre_y_stride * 16,
  1244. pre_y_stride);
  1245. }
  1246. if (near_sad[5] != INT_MAX) {
  1247. near_sad[5] = cpi->fn_ptr[BLOCK_16X16].sdf(
  1248. src_y_ptr, b->src_stride, pre_y_buffer - 16, pre_y_stride);
  1249. }
  1250. near_sad[3] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride,
  1251. pre_y_buffer, pre_y_stride);
  1252. if (near_sad[6] != INT_MAX) {
  1253. near_sad[6] = cpi->fn_ptr[BLOCK_16X16].sdf(
  1254. src_y_ptr, b->src_stride, pre_y_buffer + 16, pre_y_stride);
  1255. }
  1256. if (near_sad[7] != INT_MAX) {
  1257. near_sad[7] = cpi->fn_ptr[BLOCK_16X16].sdf(
  1258. src_y_ptr, b->src_stride, pre_y_buffer + pre_y_stride * 16,
  1259. pre_y_stride);
  1260. }
  1261. }
  1262. if (cpi->common.last_frame_type != KEY_FRAME) {
  1263. insertsortsad(near_sad, near_sadidx, 8);
  1264. } else {
  1265. insertsortsad(near_sad, near_sadidx, 3);
  1266. }
  1267. }
  1268. static void rd_update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) {
  1269. if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV) {
  1270. int i;
  1271. for (i = 0; i < x->partition_info->count; ++i) {
  1272. if (x->partition_info->bmi[i].mode == NEW4X4) {
  1273. x->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row -
  1274. best_ref_mv->as_mv.row) >>
  1275. 1)]++;
  1276. x->MVcount[1][mv_max + ((x->partition_info->bmi[i].mv.as_mv.col -
  1277. best_ref_mv->as_mv.col) >>
  1278. 1)]++;
  1279. }
  1280. }
  1281. } else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV) {
  1282. x->MVcount[0][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row -
  1283. best_ref_mv->as_mv.row) >>
  1284. 1)]++;
  1285. x->MVcount[1][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col -
  1286. best_ref_mv->as_mv.col) >>
  1287. 1)]++;
  1288. }
  1289. }
  1290. static int evaluate_inter_mode_rd(int mdcounts[4], RATE_DISTORTION *rd,
  1291. int *disable_skip, VP8_COMP *cpi,
  1292. MACROBLOCK *x) {
  1293. MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
  1294. BLOCK *b = &x->block[0];
  1295. MACROBLOCKD *xd = &x->e_mbd;
  1296. int distortion;
  1297. vp8_build_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor, 16);
  1298. if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
  1299. x->skip = 1;
  1300. } else if (x->encode_breakout) {
  1301. unsigned int sse;
  1302. unsigned int var;
  1303. unsigned int threshold =
  1304. (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4);
  1305. if (threshold < x->encode_breakout) threshold = x->encode_breakout;
  1306. var = vpx_variance16x16(*(b->base_src), b->src_stride, x->e_mbd.predictor,
  1307. 16, &sse);
  1308. if (sse < threshold) {
  1309. unsigned int q2dc = xd->block[24].dequant[0];
  1310. /* If theres is no codeable 2nd order dc
  1311. or a very small uniform pixel change change */
  1312. if ((sse - var<q2dc * q2dc>> 4) || (sse / 2 > var && sse - var < 64)) {
  1313. /* Check u and v to make sure skip is ok */
  1314. unsigned int sse2 = VP8_UVSSE(x);
  1315. if (sse2 * 2 < threshold) {
  1316. x->skip = 1;
  1317. rd->distortion2 = sse + sse2;
  1318. rd->rate2 = 500;
  1319. /* for best_yrd calculation */
  1320. rd->rate_uv = 0;
  1321. rd->distortion_uv = sse2;
  1322. *disable_skip = 1;
  1323. return RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2);
  1324. }
  1325. }
  1326. }
  1327. }
  1328. /* Add in the Mv/mode cost */
  1329. rd->rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
  1330. /* Y cost and distortion */
  1331. macro_block_yrd(x, &rd->rate_y, &distortion);
  1332. rd->rate2 += rd->rate_y;
  1333. rd->distortion2 += distortion;
  1334. /* UV cost and distortion */
  1335. rd_inter16x16_uv(cpi, x, &rd->rate_uv, &rd->distortion_uv,
  1336. cpi->common.full_pixel);
  1337. rd->rate2 += rd->rate_uv;
  1338. rd->distortion2 += rd->distortion_uv;
  1339. return INT_MAX;
  1340. }
  1341. static int calculate_final_rd_costs(int this_rd, RATE_DISTORTION *rd,
  1342. int *other_cost, int disable_skip,
  1343. int uv_intra_tteob, int intra_rd_penalty,
  1344. VP8_COMP *cpi, MACROBLOCK *x) {
  1345. MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
  1346. /* Where skip is allowable add in the default per mb cost for the no
  1347. * skip case. where we then decide to skip we have to delete this and
  1348. * replace it with the cost of signalling a skip
  1349. */
  1350. if (cpi->common.mb_no_coeff_skip) {
  1351. *other_cost += vp8_cost_bit(cpi->prob_skip_false, 0);
  1352. rd->rate2 += *other_cost;
  1353. }
  1354. /* Estimate the reference frame signaling cost and add it
  1355. * to the rolling cost variable.
  1356. */
  1357. rd->rate2 += x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
  1358. if (!disable_skip) {
  1359. /* Test for the condition where skip block will be activated
  1360. * because there are no non zero coefficients and make any
  1361. * necessary adjustment for rate
  1362. */
  1363. if (cpi->common.mb_no_coeff_skip) {
  1364. int i;
  1365. int tteob;
  1366. int has_y2_block = (this_mode != SPLITMV && this_mode != B_PRED);
  1367. tteob = 0;
  1368. if (has_y2_block) tteob += x->e_mbd.eobs[24];
  1369. for (i = 0; i < 16; ++i) tteob += (x->e_mbd.eobs[i] > has_y2_block);
  1370. if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
  1371. for (i = 16; i < 24; ++i) tteob += x->e_mbd.eobs[i];
  1372. } else {
  1373. tteob += uv_intra_tteob;
  1374. }
  1375. if (tteob == 0) {
  1376. rd->rate2 -= (rd->rate_y + rd->rate_uv);
  1377. /* for best_yrd calculation */
  1378. rd->rate_uv = 0;
  1379. /* Back out no skip flag costing and add in skip flag costing */
  1380. if (cpi->prob_skip_false) {
  1381. int prob_skip_cost;
  1382. prob_skip_cost = vp8_cost_bit(cpi->prob_skip_false, 1);
  1383. prob_skip_cost -= (int)vp8_cost_bit(cpi->prob_skip_false, 0);
  1384. rd->rate2 += prob_skip_cost;
  1385. *other_cost += prob_skip_cost;
  1386. }
  1387. }
  1388. }
  1389. /* Calculate the final RD estimate for this mode */
  1390. this_rd = RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2);
  1391. if (this_rd < INT_MAX &&
  1392. x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
  1393. this_rd += intra_rd_penalty;
  1394. }
  1395. }
  1396. return this_rd;
  1397. }
  1398. static void update_best_mode(BEST_MODE *best_mode, int this_rd,
  1399. RATE_DISTORTION *rd, int other_cost,
  1400. MACROBLOCK *x) {
  1401. MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode;
  1402. other_cost += x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
  1403. /* Calculate the final y RD estimate for this mode */
  1404. best_mode->yrd =
  1405. RDCOST(x->rdmult, x->rddiv, (rd->rate2 - rd->rate_uv - other_cost),
  1406. (rd->distortion2 - rd->distortion_uv));
  1407. best_mode->rd = this_rd;
  1408. memcpy(&best_mode->mbmode, &x->e_mbd.mode_info_context->mbmi,
  1409. sizeof(MB_MODE_INFO));
  1410. memcpy(&best_mode->partition, x->partition_info, sizeof(PARTITION_INFO));
  1411. if ((this_mode == B_PRED) || (this_mode == SPLITMV)) {
  1412. int i;
  1413. for (i = 0; i < 16; ++i) {
  1414. best_mode->bmodes[i] = x->e_mbd.block[i].bmi;
  1415. }
  1416. }
  1417. }
  1418. void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
  1419. int recon_uvoffset, int *returnrate,
  1420. int *returndistortion, int *returnintra, int mb_row,
  1421. int mb_col) {
  1422. BLOCK *b = &x->block[0];
  1423. BLOCKD *d = &x->e_mbd.block[0];
  1424. MACROBLOCKD *xd = &x->e_mbd;
  1425. int_mv best_ref_mv_sb[2];
  1426. int_mv mode_mv_sb[2][MB_MODE_COUNT];
  1427. int_mv best_ref_mv;
  1428. int_mv *mode_mv;
  1429. MB_PREDICTION_MODE this_mode;
  1430. int num00;
  1431. int best_mode_index = 0;
  1432. BEST_MODE best_mode;
  1433. int i;
  1434. int mode_index;
  1435. int mdcounts[4];
  1436. int rate;
  1437. RATE_DISTORTION rd;
  1438. int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
  1439. int uv_intra_tteob = 0;
  1440. int uv_intra_done = 0;
  1441. MB_PREDICTION_MODE uv_intra_mode = 0;
  1442. int_mv mvp;
  1443. int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
  1444. int saddone = 0;
  1445. /* search range got from mv_pred(). It uses step_param levels. (0-7) */
  1446. int sr = 0;
  1447. unsigned char *plane[4][3];
  1448. int ref_frame_map[4];
  1449. int sign_bias = 0;
  1450. int intra_rd_penalty =
  1451. 10 * vp8_dc_quant(cpi->common.base_qindex, cpi->common.y1dc_delta_q);
  1452. #if CONFIG_TEMPORAL_DENOISING
  1453. unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX,
  1454. best_rd_sse = UINT_MAX;
  1455. #endif
  1456. mode_mv = mode_mv_sb[sign_bias];
  1457. best_ref_mv.as_int = 0;
  1458. best_mode.rd = INT_MAX;
  1459. best_mode.yrd = INT_MAX;
  1460. best_mode.intra_rd = INT_MAX;
  1461. memset(mode_mv_sb, 0, sizeof(mode_mv_sb));
  1462. memset(&best_mode.mbmode, 0, sizeof(best_mode.mbmode));
  1463. memset(&best_mode.bmodes, 0, sizeof(best_mode.bmodes));
  1464. /* Setup search priorities */
  1465. get_reference_search_order(cpi, ref_frame_map);
  1466. /* Check to see if there is at least 1 valid reference frame that we need
  1467. * to calculate near_mvs.
  1468. */
  1469. if (ref_frame_map[1] > 0) {
  1470. sign_bias = vp8_find_near_mvs_bias(
  1471. &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb,
  1472. mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias);
  1473. mode_mv = mode_mv_sb[sign_bias];
  1474. best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
  1475. }
  1476. get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
  1477. *returnintra = INT_MAX;
  1478. /* Count of the number of MBs tested so far this frame */
  1479. x->mbs_tested_so_far++;
  1480. x->skip = 0;
  1481. for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
  1482. int this_rd = INT_MAX;
  1483. int disable_skip = 0;
  1484. int other_cost = 0;
  1485. int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
  1486. /* Test best rd so far against threshold for trying this mode. */
  1487. if (best_mode.rd <= x->rd_threshes[mode_index]) continue;
  1488. if (this_ref_frame < 0) continue;
  1489. /* These variables hold are rolling total cost and distortion for
  1490. * this mode
  1491. */
  1492. rd.rate2 = 0;
  1493. rd.distortion2 = 0;
  1494. this_mode = vp8_mode_order[mode_index];
  1495. x->e_mbd.mode_info_context->mbmi.mode = this_mode;
  1496. x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
  1497. /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
  1498. * unless ARNR filtering is enabled in which case we want
  1499. * an unfiltered alternative
  1500. */
  1501. if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
  1502. if (this_mode != ZEROMV ||
  1503. x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
  1504. continue;
  1505. }
  1506. }
  1507. /* everything but intra */
  1508. if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
  1509. x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
  1510. x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
  1511. x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
  1512. if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) {
  1513. sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame];
  1514. mode_mv = mode_mv_sb[sign_bias];
  1515. best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int;
  1516. }
  1517. }
  1518. /* Check to see if the testing frequency for this mode is at its
  1519. * max If so then prevent it from being tested and increase the
  1520. * threshold for its testing
  1521. */
  1522. if (x->mode_test_hit_counts[mode_index] &&
  1523. (cpi->mode_check_freq[mode_index] > 1)) {
  1524. if (x->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] *
  1525. x->mode_test_hit_counts[mode_index]) {
  1526. /* Increase the threshold for coding this mode to make it
  1527. * less likely to be chosen
  1528. */
  1529. x->rd_thresh_mult[mode_index] += 4;
  1530. if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
  1531. x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
  1532. }
  1533. x->rd_threshes[mode_index] =
  1534. (cpi->rd_baseline_thresh[mode_index] >> 7) *
  1535. x->rd_thresh_mult[mode_index];
  1536. continue;
  1537. }
  1538. }
  1539. /* We have now reached the point where we are going to test the
  1540. * current mode so increment the counter for the number of times
  1541. * it has been tested
  1542. */
  1543. x->mode_test_hit_counts[mode_index]++;
  1544. /* Experimental code. Special case for gf and arf zeromv modes.
  1545. * Increase zbin size to supress noise
  1546. */
  1547. if (x->zbin_mode_boost_enabled) {
  1548. if (this_ref_frame == INTRA_FRAME) {
  1549. x->zbin_mode_boost = 0;
  1550. } else {
  1551. if (vp8_mode_order[mode_index] == ZEROMV) {
  1552. if (this_ref_frame != LAST_FRAME) {
  1553. x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
  1554. } else {
  1555. x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
  1556. }
  1557. } else if (vp8_mode_order[mode_index] == SPLITMV) {
  1558. x->zbin_mode_boost = 0;
  1559. } else {
  1560. x->zbin_mode_boost = MV_ZBIN_BOOST;
  1561. }
  1562. }
  1563. vp8_update_zbin_extra(cpi, x);
  1564. }
  1565. if (!uv_intra_done && this_ref_frame == INTRA_FRAME) {
  1566. rd_pick_intra_mbuv_mode(x, &uv_intra_rate, &uv_intra_rate_tokenonly,
  1567. &uv_intra_distortion);
  1568. uv_intra_mode = x->e_mbd.mode_info_context->mbmi.uv_mode;
  1569. /*
  1570. * Total of the eobs is used later to further adjust rate2. Since uv
  1571. * block's intra eobs will be overwritten when we check inter modes,
  1572. * we need to save uv_intra_tteob here.
  1573. */
  1574. for (i = 16; i < 24; ++i) uv_intra_tteob += x->e_mbd.eobs[i];
  1575. uv_intra_done = 1;
  1576. }
  1577. switch (this_mode) {
  1578. case B_PRED: {
  1579. int tmp_rd;
  1580. /* Note the rate value returned here includes the cost of
  1581. * coding the BPRED mode: x->mbmode_cost[x->e_mbd.frame_type][BPRED]
  1582. */
  1583. int distortion;
  1584. tmp_rd = rd_pick_intra4x4mby_modes(x, &rate, &rd.rate_y, &distortion,
  1585. best_mode.yrd);
  1586. rd.rate2 += rate;
  1587. rd.distortion2 += distortion;
  1588. if (tmp_rd < best_mode.yrd) {
  1589. rd.rate2 += uv_intra_rate;
  1590. rd.rate_uv = uv_intra_rate_tokenonly;
  1591. rd.distortion2 += uv_intra_distortion;
  1592. rd.distortion_uv = uv_intra_distortion;
  1593. } else {
  1594. this_rd = INT_MAX;
  1595. disable_skip = 1;
  1596. }
  1597. break;
  1598. }
  1599. case SPLITMV: {
  1600. int tmp_rd;
  1601. int this_rd_thresh;
  1602. int distortion;
  1603. this_rd_thresh = (vp8_ref_frame_order[mode_index] == 1)
  1604. ? x->rd_threshes[THR_NEW1]
  1605. : x->rd_threshes[THR_NEW3];
  1606. this_rd_thresh = (vp8_ref_frame_order[mode_index] == 2)
  1607. ? x->rd_threshes[THR_NEW2]
  1608. : this_rd_thresh;
  1609. tmp_rd = vp8_rd_pick_best_mbsegmentation(
  1610. cpi, x, &best_ref_mv, best_mode.yrd, mdcounts, &rate, &rd.rate_y,
  1611. &distortion, this_rd_thresh);
  1612. rd.rate2 += rate;
  1613. rd.distortion2 += distortion;
  1614. /* If even the 'Y' rd value of split is higher than best so far
  1615. * then dont bother looking at UV
  1616. */
  1617. if (tmp_rd < best_mode.yrd) {
  1618. /* Now work out UV cost and add it in */
  1619. rd_inter4x4_uv(cpi, x, &rd.rate_uv, &rd.distortion_uv,
  1620. cpi->common.full_pixel);
  1621. rd.rate2 += rd.rate_uv;
  1622. rd.distortion2 += rd.distortion_uv;
  1623. } else {
  1624. this_rd = INT_MAX;
  1625. disable_skip = 1;
  1626. }
  1627. break;
  1628. }
  1629. case DC_PRED:
  1630. case V_PRED:
  1631. case H_PRED:
  1632. case TM_PRED: {
  1633. int distortion;
  1634. x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
  1635. vp8_build_intra_predictors_mby_s(
  1636. xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1,
  1637. xd->dst.y_stride, xd->predictor, 16);
  1638. macro_block_yrd(x, &rd.rate_y, &distortion);
  1639. rd.rate2 += rd.rate_y;
  1640. rd.distortion2 += distortion;
  1641. rd.rate2 += x->mbmode_cost[x->e_mbd.frame_type]
  1642. [x->e_mbd.mode_info_context->mbmi.mode];
  1643. rd.rate2 += uv_intra_rate;
  1644. rd.rate_uv = uv_intra_rate_tokenonly;
  1645. rd.distortion2 += uv_intra_distortion;
  1646. rd.distortion_uv = uv_intra_distortion;
  1647. break;
  1648. }
  1649. case NEWMV: {
  1650. int thissme;
  1651. int bestsme = INT_MAX;
  1652. int step_param = cpi->sf.first_step;
  1653. int further_steps;
  1654. int n;
  1655. /* If last step (1-away) of n-step search doesn't pick the center point
  1656. as the best match, we will do a final 1-away diamond refining search
  1657. */
  1658. int do_refine = 1;
  1659. int sadpb = x->sadperbit16;
  1660. int_mv mvp_full;
  1661. int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL;
  1662. int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL;
  1663. int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL;
  1664. int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL;
  1665. int tmp_col_min = x->mv_col_min;
  1666. int tmp_col_max = x->mv_col_max;
  1667. int tmp_row_min = x->mv_row_min;
  1668. int tmp_row_max = x->mv_row_max;
  1669. if (!saddone) {
  1670. vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]);
  1671. saddone = 1;
  1672. }
  1673. vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
  1674. x->e_mbd.mode_info_context->mbmi.ref_frame,
  1675. cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
  1676. mvp_full.as_mv.col = mvp.as_mv.col >> 3;
  1677. mvp_full.as_mv.row = mvp.as_mv.row >> 3;
  1678. /* Get intersection of UMV window and valid MV window to
  1679. * reduce # of checks in diamond search.
  1680. */
  1681. if (x->mv_col_min < col_min) x->mv_col_min = col_min;
  1682. if (x->mv_col_max > col_max) x->mv_col_max = col_max;
  1683. if (x->mv_row_min < row_min) x->mv_row_min = row_min;
  1684. if (x->mv_row_max > row_max) x->mv_row_max = row_max;
  1685. /* adjust search range according to sr from mv prediction */
  1686. if (sr > step_param) step_param = sr;
  1687. /* Initial step/diamond search */
  1688. {
  1689. bestsme = cpi->diamond_search_sad(
  1690. x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00,
  1691. &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
  1692. mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
  1693. /* Further step/diamond searches as necessary */
  1694. further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
  1695. n = num00;
  1696. num00 = 0;
  1697. /* If there won't be more n-step search, check to see if refining
  1698. * search is needed. */
  1699. if (n > further_steps) do_refine = 0;
  1700. while (n < further_steps) {
  1701. n++;
  1702. if (num00) {
  1703. num00--;
  1704. } else {
  1705. thissme = cpi->diamond_search_sad(
  1706. x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb, &num00,
  1707. &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
  1708. /* check to see if refining search is needed. */
  1709. if (num00 > (further_steps - n)) do_refine = 0;
  1710. if (thissme < bestsme) {
  1711. bestsme = thissme;
  1712. mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
  1713. } else {
  1714. d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
  1715. }
  1716. }
  1717. }
  1718. }
  1719. /* final 1-away diamond refining search */
  1720. if (do_refine == 1) {
  1721. int search_range;
  1722. search_range = 8;
  1723. thissme = cpi->refining_search_sad(
  1724. x, b, d, &d->bmi.mv, sadpb, search_range,
  1725. &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
  1726. if (thissme < bestsme) {
  1727. bestsme = thissme;
  1728. mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
  1729. } else {
  1730. d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
  1731. }
  1732. }
  1733. x->mv_col_min = tmp_col_min;
  1734. x->mv_col_max = tmp_col_max;
  1735. x->mv_row_min = tmp_row_min;
  1736. x->mv_row_max = tmp_row_max;
  1737. if (bestsme < INT_MAX) {
  1738. int dis; /* TODO: use dis in distortion calculation later. */
  1739. unsigned int sse;
  1740. cpi->find_fractional_mv_step(
  1741. x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
  1742. &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &dis, &sse);
  1743. }
  1744. mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
  1745. /* Add the new motion vector cost to our rolling cost variable */
  1746. rd.rate2 +=
  1747. vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96);
  1748. }
  1749. case NEARESTMV:
  1750. case NEARMV:
  1751. /* Clip "next_nearest" so that it does not extend to far out
  1752. * of image
  1753. */
  1754. vp8_clamp_mv2(&mode_mv[this_mode], xd);
  1755. /* Do not bother proceeding if the vector (from newmv, nearest
  1756. * or near) is 0,0 as this should then be coded using the zeromv
  1757. * mode.
  1758. */
  1759. if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) &&
  1760. (mode_mv[this_mode].as_int == 0)) {
  1761. continue;
  1762. }
  1763. case ZEROMV:
  1764. /* Trap vectors that reach beyond the UMV borders
  1765. * Note that ALL New MV, Nearest MV Near MV and Zero MV code
  1766. * drops through to this point because of the lack of break
  1767. * statements in the previous two cases.
  1768. */
  1769. if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
  1770. ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
  1771. ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
  1772. ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
  1773. continue;
  1774. }
  1775. vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
  1776. this_rd = evaluate_inter_mode_rd(mdcounts, &rd, &disable_skip, cpi, x);
  1777. break;
  1778. default: break;
  1779. }
  1780. this_rd =
  1781. calculate_final_rd_costs(this_rd, &rd, &other_cost, disable_skip,
  1782. uv_intra_tteob, intra_rd_penalty, cpi, x);
  1783. /* Keep record of best intra distortion */
  1784. if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
  1785. (this_rd < best_mode.intra_rd)) {
  1786. best_mode.intra_rd = this_rd;
  1787. *returnintra = rd.distortion2;
  1788. }
  1789. #if CONFIG_TEMPORAL_DENOISING
  1790. if (cpi->oxcf.noise_sensitivity) {
  1791. unsigned int sse;
  1792. vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], &sse,
  1793. mode_mv[this_mode]);
  1794. if (sse < best_rd_sse) best_rd_sse = sse;
  1795. /* Store for later use by denoiser. */
  1796. if (this_mode == ZEROMV && sse < zero_mv_sse) {
  1797. zero_mv_sse = sse;
  1798. x->best_zeromv_reference_frame =
  1799. x->e_mbd.mode_info_context->mbmi.ref_frame;
  1800. }
  1801. /* Store the best NEWMV in x for later use in the denoiser. */
  1802. if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse) {
  1803. best_sse = sse;
  1804. vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], &best_sse,
  1805. mode_mv[this_mode]);
  1806. x->best_sse_inter_mode = NEWMV;
  1807. x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
  1808. x->need_to_clamp_best_mvs =
  1809. x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
  1810. x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame;
  1811. }
  1812. }
  1813. #endif
  1814. /* Did this mode help.. i.i is it the new best mode */
  1815. if (this_rd < best_mode.rd || x->skip) {
  1816. /* Note index of best mode so far */
  1817. best_mode_index = mode_index;
  1818. *returnrate = rd.rate2;
  1819. *returndistortion = rd.distortion2;
  1820. if (this_mode <= B_PRED) {
  1821. x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode;
  1822. /* required for left and above block mv */
  1823. x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
  1824. }
  1825. update_best_mode(&best_mode, this_rd, &rd, other_cost, x);
  1826. /* Testing this mode gave rise to an improvement in best error
  1827. * score. Lower threshold a bit for next time
  1828. */
  1829. x->rd_thresh_mult[mode_index] =
  1830. (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2))
  1831. ? x->rd_thresh_mult[mode_index] - 2
  1832. : MIN_THRESHMULT;
  1833. }
  1834. /* If the mode did not help improve the best error case then raise
  1835. * the threshold for testing that mode next time around.
  1836. */
  1837. else {
  1838. x->rd_thresh_mult[mode_index] += 4;
  1839. if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
  1840. x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
  1841. }
  1842. }
  1843. x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
  1844. x->rd_thresh_mult[mode_index];
  1845. if (x->skip) break;
  1846. }
  1847. /* Reduce the activation RD thresholds for the best choice mode */
  1848. if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
  1849. (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
  1850. int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 2);
  1851. x->rd_thresh_mult[best_mode_index] =
  1852. (x->rd_thresh_mult[best_mode_index] >=
  1853. (MIN_THRESHMULT + best_adjustment))
  1854. ? x->rd_thresh_mult[best_mode_index] - best_adjustment
  1855. : MIN_THRESHMULT;
  1856. x->rd_threshes[best_mode_index] =
  1857. (cpi->rd_baseline_thresh[best_mode_index] >> 7) *
  1858. x->rd_thresh_mult[best_mode_index];
  1859. }
  1860. #if CONFIG_TEMPORAL_DENOISING
  1861. if (cpi->oxcf.noise_sensitivity) {
  1862. int block_index = mb_row * cpi->common.mb_cols + mb_col;
  1863. if (x->best_sse_inter_mode == DC_PRED) {
  1864. /* No best MV found. */
  1865. x->best_sse_inter_mode = best_mode.mbmode.mode;
  1866. x->best_sse_mv = best_mode.mbmode.mv;
  1867. x->need_to_clamp_best_mvs = best_mode.mbmode.need_to_clamp_mvs;
  1868. x->best_reference_frame = best_mode.mbmode.ref_frame;
  1869. best_sse = best_rd_sse;
  1870. }
  1871. vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
  1872. recon_yoffset, recon_uvoffset, &cpi->common.lf_info,
  1873. mb_row, mb_col, block_index, 0);
  1874. /* Reevaluate ZEROMV after denoising. */
  1875. if (best_mode.mbmode.ref_frame == INTRA_FRAME &&
  1876. x->best_zeromv_reference_frame != INTRA_FRAME) {
  1877. int this_rd = INT_MAX;
  1878. int disable_skip = 0;
  1879. int other_cost = 0;
  1880. int this_ref_frame = x->best_zeromv_reference_frame;
  1881. rd.rate2 =
  1882. x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts);
  1883. rd.distortion2 = 0;
  1884. /* set up the proper prediction buffers for the frame */
  1885. x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
  1886. x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
  1887. x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
  1888. x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
  1889. x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
  1890. x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
  1891. x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
  1892. this_rd = evaluate_inter_mode_rd(mdcounts, &rd, &disable_skip, cpi, x);
  1893. this_rd =
  1894. calculate_final_rd_costs(this_rd, &rd, &other_cost, disable_skip,
  1895. uv_intra_tteob, intra_rd_penalty, cpi, x);
  1896. if (this_rd < best_mode.rd || x->skip) {
  1897. *returnrate = rd.rate2;
  1898. *returndistortion = rd.distortion2;
  1899. update_best_mode(&best_mode, this_rd, &rd, other_cost, x);
  1900. }
  1901. }
  1902. }
  1903. #endif
  1904. if (cpi->is_src_frame_alt_ref &&
  1905. (best_mode.mbmode.mode != ZEROMV ||
  1906. best_mode.mbmode.ref_frame != ALTREF_FRAME)) {
  1907. x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
  1908. x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME;
  1909. x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
  1910. x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
  1911. x->e_mbd.mode_info_context->mbmi.mb_skip_coeff =
  1912. (cpi->common.mb_no_coeff_skip);
  1913. x->e_mbd.mode_info_context->mbmi.partitioning = 0;
  1914. return;
  1915. }
  1916. /* macroblock modes */
  1917. memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode,
  1918. sizeof(MB_MODE_INFO));
  1919. if (best_mode.mbmode.mode == B_PRED) {
  1920. for (i = 0; i < 16; ++i) {
  1921. xd->mode_info_context->bmi[i].as_mode = best_mode.bmodes[i].as_mode;
  1922. }
  1923. }
  1924. if (best_mode.mbmode.mode == SPLITMV) {
  1925. for (i = 0; i < 16; ++i) {
  1926. xd->mode_info_context->bmi[i].mv.as_int = best_mode.bmodes[i].mv.as_int;
  1927. }
  1928. memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO));
  1929. x->e_mbd.mode_info_context->mbmi.mv.as_int =
  1930. x->partition_info->bmi[15].mv.as_int;
  1931. }
  1932. if (sign_bias !=
  1933. cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
  1934. best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
  1935. }
  1936. rd_update_mvcount(x, &best_ref_mv);
  1937. }
  1938. void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate_) {
  1939. int error4x4, error16x16;
  1940. int rate4x4, rate16x16 = 0, rateuv;
  1941. int dist4x4, dist16x16, distuv;
  1942. int rate;
  1943. int rate4x4_tokenonly = 0;
  1944. int rate16x16_tokenonly = 0;
  1945. int rateuv_tokenonly = 0;
  1946. x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
  1947. rd_pick_intra_mbuv_mode(x, &rateuv, &rateuv_tokenonly, &distuv);
  1948. rate = rateuv;
  1949. error16x16 = rd_pick_intra16x16mby_mode(x, &rate16x16, &rate16x16_tokenonly,
  1950. &dist16x16);
  1951. error4x4 = rd_pick_intra4x4mby_modes(x, &rate4x4, &rate4x4_tokenonly,
  1952. &dist4x4, error16x16);
  1953. if (error4x4 < error16x16) {
  1954. x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
  1955. rate += rate4x4;
  1956. } else {
  1957. rate += rate16x16;
  1958. }
  1959. *rate_ = rate;
  1960. }