mcomp.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "./vp8_rtcd.h"
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "onyx_int.h"
  13. #include "mcomp.h"
  14. #include "vpx_mem/vpx_mem.h"
  15. #include "vpx_config.h"
  16. #include <stdio.h>
  17. #include <limits.h>
  18. #include <math.h>
  19. #include "vp8/common/findnearmv.h"
  20. #include "vp8/common/common.h"
  21. #include "vpx_dsp/vpx_dsp_common.h"
  22. int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) {
  23. /* MV costing is based on the distribution of vectors in the previous
  24. * frame and as such will tend to over state the cost of vectors. In
  25. * addition coding a new vector can have a knock on effect on the cost
  26. * of subsequent vectors and the quality of prediction from NEAR and
  27. * NEAREST for subsequent blocks. The "Weight" parameter allows, to a
  28. * limited extent, for some account to be taken of these factors.
  29. */
  30. const int mv_idx_row =
  31. clamp((mv->as_mv.row - ref->as_mv.row) >> 1, 0, MVvals);
  32. const int mv_idx_col =
  33. clamp((mv->as_mv.col - ref->as_mv.col) >> 1, 0, MVvals);
  34. return ((mvcost[0][mv_idx_row] + mvcost[1][mv_idx_col]) * Weight) >> 7;
  35. }
  36. static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
  37. int error_per_bit) {
  38. /* Ignore mv costing if mvcost is NULL */
  39. if (mvcost) {
  40. const int mv_idx_row =
  41. clamp((mv->as_mv.row - ref->as_mv.row) >> 1, 0, MVvals);
  42. const int mv_idx_col =
  43. clamp((mv->as_mv.col - ref->as_mv.col) >> 1, 0, MVvals);
  44. return ((mvcost[0][mv_idx_row] + mvcost[1][mv_idx_col]) * error_per_bit +
  45. 128) >>
  46. 8;
  47. }
  48. return 0;
  49. }
  50. static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2],
  51. int error_per_bit) {
  52. /* Calculate sad error cost on full pixel basis. */
  53. /* Ignore mv costing if mvsadcost is NULL */
  54. if (mvsadcost) {
  55. return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
  56. mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)]) *
  57. error_per_bit +
  58. 128) >>
  59. 8;
  60. }
  61. return 0;
  62. }
  63. void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride) {
  64. int Len;
  65. int search_site_count = 0;
  66. /* Generate offsets for 4 search sites per step. */
  67. Len = MAX_FIRST_STEP;
  68. x->ss[search_site_count].mv.col = 0;
  69. x->ss[search_site_count].mv.row = 0;
  70. x->ss[search_site_count].offset = 0;
  71. search_site_count++;
  72. while (Len > 0) {
  73. /* Compute offsets for search sites. */
  74. x->ss[search_site_count].mv.col = 0;
  75. x->ss[search_site_count].mv.row = -Len;
  76. x->ss[search_site_count].offset = -Len * stride;
  77. search_site_count++;
  78. /* Compute offsets for search sites. */
  79. x->ss[search_site_count].mv.col = 0;
  80. x->ss[search_site_count].mv.row = Len;
  81. x->ss[search_site_count].offset = Len * stride;
  82. search_site_count++;
  83. /* Compute offsets for search sites. */
  84. x->ss[search_site_count].mv.col = -Len;
  85. x->ss[search_site_count].mv.row = 0;
  86. x->ss[search_site_count].offset = -Len;
  87. search_site_count++;
  88. /* Compute offsets for search sites. */
  89. x->ss[search_site_count].mv.col = Len;
  90. x->ss[search_site_count].mv.row = 0;
  91. x->ss[search_site_count].offset = Len;
  92. search_site_count++;
  93. /* Contract. */
  94. Len /= 2;
  95. }
  96. x->ss_count = search_site_count;
  97. x->searches_per_step = 4;
  98. }
  99. void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
  100. int Len;
  101. int search_site_count = 0;
  102. /* Generate offsets for 8 search sites per step. */
  103. Len = MAX_FIRST_STEP;
  104. x->ss[search_site_count].mv.col = 0;
  105. x->ss[search_site_count].mv.row = 0;
  106. x->ss[search_site_count].offset = 0;
  107. search_site_count++;
  108. while (Len > 0) {
  109. /* Compute offsets for search sites. */
  110. x->ss[search_site_count].mv.col = 0;
  111. x->ss[search_site_count].mv.row = -Len;
  112. x->ss[search_site_count].offset = -Len * stride;
  113. search_site_count++;
  114. /* Compute offsets for search sites. */
  115. x->ss[search_site_count].mv.col = 0;
  116. x->ss[search_site_count].mv.row = Len;
  117. x->ss[search_site_count].offset = Len * stride;
  118. search_site_count++;
  119. /* Compute offsets for search sites. */
  120. x->ss[search_site_count].mv.col = -Len;
  121. x->ss[search_site_count].mv.row = 0;
  122. x->ss[search_site_count].offset = -Len;
  123. search_site_count++;
  124. /* Compute offsets for search sites. */
  125. x->ss[search_site_count].mv.col = Len;
  126. x->ss[search_site_count].mv.row = 0;
  127. x->ss[search_site_count].offset = Len;
  128. search_site_count++;
  129. /* Compute offsets for search sites. */
  130. x->ss[search_site_count].mv.col = -Len;
  131. x->ss[search_site_count].mv.row = -Len;
  132. x->ss[search_site_count].offset = -Len * stride - Len;
  133. search_site_count++;
  134. /* Compute offsets for search sites. */
  135. x->ss[search_site_count].mv.col = Len;
  136. x->ss[search_site_count].mv.row = -Len;
  137. x->ss[search_site_count].offset = -Len * stride + Len;
  138. search_site_count++;
  139. /* Compute offsets for search sites. */
  140. x->ss[search_site_count].mv.col = -Len;
  141. x->ss[search_site_count].mv.row = Len;
  142. x->ss[search_site_count].offset = Len * stride - Len;
  143. search_site_count++;
  144. /* Compute offsets for search sites. */
  145. x->ss[search_site_count].mv.col = Len;
  146. x->ss[search_site_count].mv.row = Len;
  147. x->ss[search_site_count].offset = Len * stride + Len;
  148. search_site_count++;
  149. /* Contract. */
  150. Len /= 2;
  151. }
  152. x->ss_count = search_site_count;
  153. x->searches_per_step = 8;
  154. }
  155. /*
  156. * To avoid the penalty for crossing cache-line read, preload the reference
  157. * area in a small buffer, which is aligned to make sure there won't be crossing
  158. * cache-line read while reading from this buffer. This reduced the cpu
  159. * cycles spent on reading ref data in sub-pixel filter functions.
  160. * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x
  161. * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
  162. * could reduce the area.
  163. */
  164. /* estimated cost of a motion vector (r,c) */
  165. #define MVC(r, c) \
  166. (mvcost \
  167. ? ((mvcost[0][(r)-rr] + mvcost[1][(c)-rc]) * error_per_bit + 128) >> 8 \
  168. : 0)
  169. /* pointer to predictor base of a motionvector */
  170. #define PRE(r, c) (y + (((r) >> 2) * y_stride + ((c) >> 2) - (offset)))
  171. /* convert motion vector component to offset for svf calc */
  172. #define SP(x) (((x)&3) << 1)
  173. /* returns subpixel variance error function. */
  174. #define DIST(r, c) \
  175. vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, b->src_stride, &sse)
  176. #define IFMVCV(r, c, s, e) \
  177. if (c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
  178. /* returns distortion + motion vector cost */
  179. #define ERR(r, c) (MVC(r, c) + DIST(r, c))
  180. /* checks if (r,c) has better score than previous best */
  181. #define CHECK_BETTER(v, r, c) \
  182. IFMVCV(r, c, \
  183. { \
  184. thismse = DIST(r, c); \
  185. if ((v = (MVC(r, c) + thismse)) < besterr) { \
  186. besterr = v; \
  187. br = r; \
  188. bc = c; \
  189. *distortion = thismse; \
  190. *sse1 = sse; \
  191. } \
  192. }, \
  193. v = UINT_MAX;)
  194. int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
  195. int_mv *bestmv, int_mv *ref_mv,
  196. int error_per_bit,
  197. const vp8_variance_fn_ptr_t *vfp,
  198. int *mvcost[2], int *distortion,
  199. unsigned int *sse1) {
  200. unsigned char *z = (*(b->base_src) + b->src);
  201. int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
  202. int br = bestmv->as_mv.row * 4, bc = bestmv->as_mv.col * 4;
  203. int tr = br, tc = bc;
  204. unsigned int besterr;
  205. unsigned int left, right, up, down, diag;
  206. unsigned int sse;
  207. unsigned int whichdir;
  208. unsigned int halfiters = 4;
  209. unsigned int quarteriters = 4;
  210. int thismse;
  211. int minc = VPXMAX(x->mv_col_min * 4,
  212. (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
  213. int maxc = VPXMIN(x->mv_col_max * 4,
  214. (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
  215. int minr = VPXMAX(x->mv_row_min * 4,
  216. (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
  217. int maxr = VPXMIN(x->mv_row_max * 4,
  218. (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
  219. int y_stride;
  220. int offset;
  221. int pre_stride = x->e_mbd.pre.y_stride;
  222. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  223. #if ARCH_X86 || ARCH_X86_64
  224. MACROBLOCKD *xd = &x->e_mbd;
  225. unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
  226. bestmv->as_mv.col;
  227. unsigned char *y;
  228. int buf_r1, buf_r2, buf_c1;
  229. /* Clamping to avoid out-of-range data access */
  230. buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)
  231. ? (bestmv->as_mv.row - x->mv_row_min)
  232. : 3;
  233. buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)
  234. ? (x->mv_row_max - bestmv->as_mv.row)
  235. : 3;
  236. buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)
  237. ? (bestmv->as_mv.col - x->mv_col_min)
  238. : 3;
  239. y_stride = 32;
  240. /* Copy to intermediate buffer before searching. */
  241. vfp->copymem(y_0 - buf_c1 - pre_stride * buf_r1, pre_stride, xd->y_buf,
  242. y_stride, 16 + buf_r1 + buf_r2);
  243. y = xd->y_buf + y_stride * buf_r1 + buf_c1;
  244. #else
  245. unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
  246. bestmv->as_mv.col;
  247. y_stride = pre_stride;
  248. #endif
  249. offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
  250. /* central mv */
  251. bestmv->as_mv.row *= 8;
  252. bestmv->as_mv.col *= 8;
  253. /* calculate central point error */
  254. besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
  255. *distortion = besterr;
  256. besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
  257. /* TODO: Each subsequent iteration checks at least one point in common
  258. * with the last iteration could be 2 ( if diag selected)
  259. */
  260. while (--halfiters) {
  261. /* 1/2 pel */
  262. CHECK_BETTER(left, tr, tc - 2);
  263. CHECK_BETTER(right, tr, tc + 2);
  264. CHECK_BETTER(up, tr - 2, tc);
  265. CHECK_BETTER(down, tr + 2, tc);
  266. whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
  267. switch (whichdir) {
  268. case 0: CHECK_BETTER(diag, tr - 2, tc - 2); break;
  269. case 1: CHECK_BETTER(diag, tr - 2, tc + 2); break;
  270. case 2: CHECK_BETTER(diag, tr + 2, tc - 2); break;
  271. case 3: CHECK_BETTER(diag, tr + 2, tc + 2); break;
  272. }
  273. /* no reason to check the same one again. */
  274. if (tr == br && tc == bc) break;
  275. tr = br;
  276. tc = bc;
  277. }
  278. /* TODO: Each subsequent iteration checks at least one point in common
  279. * with the last iteration could be 2 ( if diag selected)
  280. */
  281. /* 1/4 pel */
  282. while (--quarteriters) {
  283. CHECK_BETTER(left, tr, tc - 1);
  284. CHECK_BETTER(right, tr, tc + 1);
  285. CHECK_BETTER(up, tr - 1, tc);
  286. CHECK_BETTER(down, tr + 1, tc);
  287. whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
  288. switch (whichdir) {
  289. case 0: CHECK_BETTER(diag, tr - 1, tc - 1); break;
  290. case 1: CHECK_BETTER(diag, tr - 1, tc + 1); break;
  291. case 2: CHECK_BETTER(diag, tr + 1, tc - 1); break;
  292. case 3: CHECK_BETTER(diag, tr + 1, tc + 1); break;
  293. }
  294. /* no reason to check the same one again. */
  295. if (tr == br && tc == bc) break;
  296. tr = br;
  297. tc = bc;
  298. }
  299. bestmv->as_mv.row = br * 2;
  300. bestmv->as_mv.col = bc * 2;
  301. if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
  302. (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3))) {
  303. return INT_MAX;
  304. }
  305. return besterr;
  306. }
  307. #undef MVC
  308. #undef PRE
  309. #undef SP
  310. #undef DIST
  311. #undef IFMVCV
  312. #undef ERR
  313. #undef CHECK_BETTER
  314. int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
  315. int_mv *bestmv, int_mv *ref_mv,
  316. int error_per_bit,
  317. const vp8_variance_fn_ptr_t *vfp,
  318. int *mvcost[2], int *distortion,
  319. unsigned int *sse1) {
  320. int bestmse = INT_MAX;
  321. int_mv startmv;
  322. int_mv this_mv;
  323. unsigned char *z = (*(b->base_src) + b->src);
  324. int left, right, up, down, diag;
  325. unsigned int sse;
  326. int whichdir;
  327. int thismse;
  328. int y_stride;
  329. int pre_stride = x->e_mbd.pre.y_stride;
  330. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  331. #if ARCH_X86 || ARCH_X86_64
  332. MACROBLOCKD *xd = &x->e_mbd;
  333. unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
  334. bestmv->as_mv.col;
  335. unsigned char *y;
  336. y_stride = 32;
  337. /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
  338. vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
  339. y = xd->y_buf + y_stride + 1;
  340. #else
  341. unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
  342. bestmv->as_mv.col;
  343. y_stride = pre_stride;
  344. #endif
  345. /* central mv */
  346. bestmv->as_mv.row *= 8;
  347. bestmv->as_mv.col *= 8;
  348. startmv = *bestmv;
  349. /* calculate central point error */
  350. bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
  351. *distortion = bestmse;
  352. bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
  353. /* go left then right and check error */
  354. this_mv.as_mv.row = startmv.as_mv.row;
  355. this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
  356. /* "halfpix" horizontal variance */
  357. thismse = vfp->svf(y - 1, y_stride, 4, 0, z, b->src_stride, &sse);
  358. left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  359. if (left < bestmse) {
  360. *bestmv = this_mv;
  361. bestmse = left;
  362. *distortion = thismse;
  363. *sse1 = sse;
  364. }
  365. this_mv.as_mv.col += 8;
  366. /* "halfpix" horizontal variance */
  367. thismse = vfp->svf(y, y_stride, 4, 0, z, b->src_stride, &sse);
  368. right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  369. if (right < bestmse) {
  370. *bestmv = this_mv;
  371. bestmse = right;
  372. *distortion = thismse;
  373. *sse1 = sse;
  374. }
  375. /* go up then down and check error */
  376. this_mv.as_mv.col = startmv.as_mv.col;
  377. this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
  378. /* "halfpix" vertical variance */
  379. thismse = vfp->svf(y - y_stride, y_stride, 0, 4, z, b->src_stride, &sse);
  380. up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  381. if (up < bestmse) {
  382. *bestmv = this_mv;
  383. bestmse = up;
  384. *distortion = thismse;
  385. *sse1 = sse;
  386. }
  387. this_mv.as_mv.row += 8;
  388. /* "halfpix" vertical variance */
  389. thismse = vfp->svf(y, y_stride, 0, 4, z, b->src_stride, &sse);
  390. down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  391. if (down < bestmse) {
  392. *bestmv = this_mv;
  393. bestmse = down;
  394. *distortion = thismse;
  395. *sse1 = sse;
  396. }
  397. /* now check 1 more diagonal */
  398. whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
  399. this_mv = startmv;
  400. switch (whichdir) {
  401. case 0:
  402. this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
  403. this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
  404. /* "halfpix" horizontal/vertical variance */
  405. thismse =
  406. vfp->svf(y - 1 - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
  407. break;
  408. case 1:
  409. this_mv.as_mv.col += 4;
  410. this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
  411. /* "halfpix" horizontal/vertical variance */
  412. thismse = vfp->svf(y - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
  413. break;
  414. case 2:
  415. this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
  416. this_mv.as_mv.row += 4;
  417. /* "halfpix" horizontal/vertical variance */
  418. thismse = vfp->svf(y - 1, y_stride, 4, 4, z, b->src_stride, &sse);
  419. break;
  420. case 3:
  421. default:
  422. this_mv.as_mv.col += 4;
  423. this_mv.as_mv.row += 4;
  424. /* "halfpix" horizontal/vertical variance */
  425. thismse = vfp->svf(y, y_stride, 4, 4, z, b->src_stride, &sse);
  426. break;
  427. }
  428. diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  429. if (diag < bestmse) {
  430. *bestmv = this_mv;
  431. bestmse = diag;
  432. *distortion = thismse;
  433. *sse1 = sse;
  434. }
  435. /* time to check quarter pels. */
  436. if (bestmv->as_mv.row < startmv.as_mv.row) y -= y_stride;
  437. if (bestmv->as_mv.col < startmv.as_mv.col) y--;
  438. startmv = *bestmv;
  439. /* go left then right and check error */
  440. this_mv.as_mv.row = startmv.as_mv.row;
  441. if (startmv.as_mv.col & 7) {
  442. this_mv.as_mv.col = startmv.as_mv.col - 2;
  443. thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
  444. this_mv.as_mv.row & 7, z, b->src_stride, &sse);
  445. } else {
  446. this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
  447. thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z,
  448. b->src_stride, &sse);
  449. }
  450. left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  451. if (left < bestmse) {
  452. *bestmv = this_mv;
  453. bestmse = left;
  454. *distortion = thismse;
  455. *sse1 = sse;
  456. }
  457. this_mv.as_mv.col += 4;
  458. thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7,
  459. z, b->src_stride, &sse);
  460. right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  461. if (right < bestmse) {
  462. *bestmv = this_mv;
  463. bestmse = right;
  464. *distortion = thismse;
  465. *sse1 = sse;
  466. }
  467. /* go up then down and check error */
  468. this_mv.as_mv.col = startmv.as_mv.col;
  469. if (startmv.as_mv.row & 7) {
  470. this_mv.as_mv.row = startmv.as_mv.row - 2;
  471. thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
  472. this_mv.as_mv.row & 7, z, b->src_stride, &sse);
  473. } else {
  474. this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
  475. thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z,
  476. b->src_stride, &sse);
  477. }
  478. up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  479. if (up < bestmse) {
  480. *bestmv = this_mv;
  481. bestmse = up;
  482. *distortion = thismse;
  483. *sse1 = sse;
  484. }
  485. this_mv.as_mv.row += 4;
  486. thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7,
  487. z, b->src_stride, &sse);
  488. down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  489. if (down < bestmse) {
  490. *bestmv = this_mv;
  491. bestmse = down;
  492. *distortion = thismse;
  493. *sse1 = sse;
  494. }
  495. /* now check 1 more diagonal */
  496. whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
  497. this_mv = startmv;
  498. switch (whichdir) {
  499. case 0:
  500. if (startmv.as_mv.row & 7) {
  501. this_mv.as_mv.row -= 2;
  502. if (startmv.as_mv.col & 7) {
  503. this_mv.as_mv.col -= 2;
  504. thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
  505. this_mv.as_mv.row & 7, z, b->src_stride, &sse);
  506. } else {
  507. this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
  508. thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z,
  509. b->src_stride, &sse);
  510. }
  511. } else {
  512. this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
  513. if (startmv.as_mv.col & 7) {
  514. this_mv.as_mv.col -= 2;
  515. thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6,
  516. z, b->src_stride, &sse);
  517. } else {
  518. this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
  519. thismse = vfp->svf(y - y_stride - 1, y_stride, 6, 6, z, b->src_stride,
  520. &sse);
  521. }
  522. }
  523. break;
  524. case 1:
  525. this_mv.as_mv.col += 2;
  526. if (startmv.as_mv.row & 7) {
  527. this_mv.as_mv.row -= 2;
  528. thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
  529. this_mv.as_mv.row & 7, z, b->src_stride, &sse);
  530. } else {
  531. this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
  532. thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z,
  533. b->src_stride, &sse);
  534. }
  535. break;
  536. case 2:
  537. this_mv.as_mv.row += 2;
  538. if (startmv.as_mv.col & 7) {
  539. this_mv.as_mv.col -= 2;
  540. thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
  541. this_mv.as_mv.row & 7, z, b->src_stride, &sse);
  542. } else {
  543. this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
  544. thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z,
  545. b->src_stride, &sse);
  546. }
  547. break;
  548. case 3:
  549. this_mv.as_mv.col += 2;
  550. this_mv.as_mv.row += 2;
  551. thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7,
  552. this_mv.as_mv.row & 7, z, b->src_stride, &sse);
  553. break;
  554. }
  555. diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  556. if (diag < bestmse) {
  557. *bestmv = this_mv;
  558. bestmse = diag;
  559. *distortion = thismse;
  560. *sse1 = sse;
  561. }
  562. return bestmse;
  563. }
  564. int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
  565. int_mv *bestmv, int_mv *ref_mv,
  566. int error_per_bit,
  567. const vp8_variance_fn_ptr_t *vfp,
  568. int *mvcost[2], int *distortion,
  569. unsigned int *sse1) {
  570. int bestmse = INT_MAX;
  571. int_mv startmv;
  572. int_mv this_mv;
  573. unsigned char *z = (*(b->base_src) + b->src);
  574. int left, right, up, down, diag;
  575. unsigned int sse;
  576. int whichdir;
  577. int thismse;
  578. int y_stride;
  579. int pre_stride = x->e_mbd.pre.y_stride;
  580. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  581. #if ARCH_X86 || ARCH_X86_64
  582. MACROBLOCKD *xd = &x->e_mbd;
  583. unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
  584. bestmv->as_mv.col;
  585. unsigned char *y;
  586. y_stride = 32;
  587. /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
  588. vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18);
  589. y = xd->y_buf + y_stride + 1;
  590. #else
  591. unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride +
  592. bestmv->as_mv.col;
  593. y_stride = pre_stride;
  594. #endif
  595. /* central mv */
  596. bestmv->as_mv.row *= 8;
  597. bestmv->as_mv.col *= 8;
  598. startmv = *bestmv;
  599. /* calculate central point error */
  600. bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
  601. *distortion = bestmse;
  602. bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
  603. /* go left then right and check error */
  604. this_mv.as_mv.row = startmv.as_mv.row;
  605. this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
  606. /* "halfpix" horizontal variance */
  607. thismse = vfp->svf(y - 1, y_stride, 4, 0, z, b->src_stride, &sse);
  608. left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  609. if (left < bestmse) {
  610. *bestmv = this_mv;
  611. bestmse = left;
  612. *distortion = thismse;
  613. *sse1 = sse;
  614. }
  615. this_mv.as_mv.col += 8;
  616. /* "halfpix" horizontal variance */
  617. thismse = vfp->svf(y, y_stride, 4, 0, z, b->src_stride, &sse);
  618. right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  619. if (right < bestmse) {
  620. *bestmv = this_mv;
  621. bestmse = right;
  622. *distortion = thismse;
  623. *sse1 = sse;
  624. }
  625. /* go up then down and check error */
  626. this_mv.as_mv.col = startmv.as_mv.col;
  627. this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
  628. /* "halfpix" vertical variance */
  629. thismse = vfp->svf(y - y_stride, y_stride, 0, 4, z, b->src_stride, &sse);
  630. up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  631. if (up < bestmse) {
  632. *bestmv = this_mv;
  633. bestmse = up;
  634. *distortion = thismse;
  635. *sse1 = sse;
  636. }
  637. this_mv.as_mv.row += 8;
  638. /* "halfpix" vertical variance */
  639. thismse = vfp->svf(y, y_stride, 0, 4, z, b->src_stride, &sse);
  640. down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  641. if (down < bestmse) {
  642. *bestmv = this_mv;
  643. bestmse = down;
  644. *distortion = thismse;
  645. *sse1 = sse;
  646. }
  647. /* now check 1 more diagonal - */
  648. whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
  649. this_mv = startmv;
  650. switch (whichdir) {
  651. case 0:
  652. this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
  653. this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
  654. /* "halfpix" horizontal/vertical variance */
  655. thismse =
  656. vfp->svf(y - 1 - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
  657. break;
  658. case 1:
  659. this_mv.as_mv.col += 4;
  660. this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
  661. /* "halfpix" horizontal/vertical variance */
  662. thismse = vfp->svf(y - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
  663. break;
  664. case 2:
  665. this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
  666. this_mv.as_mv.row += 4;
  667. /* "halfpix" horizontal/vertical variance */
  668. thismse = vfp->svf(y - 1, y_stride, 4, 4, z, b->src_stride, &sse);
  669. break;
  670. case 3:
  671. default:
  672. this_mv.as_mv.col += 4;
  673. this_mv.as_mv.row += 4;
  674. /* "halfpix" horizontal/vertical variance */
  675. thismse = vfp->svf(y, y_stride, 4, 4, z, b->src_stride, &sse);
  676. break;
  677. }
  678. diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
  679. if (diag < bestmse) {
  680. *bestmv = this_mv;
  681. bestmse = diag;
  682. *distortion = thismse;
  683. *sse1 = sse;
  684. }
  685. return bestmse;
  686. }
  687. #define CHECK_BOUNDS(range) \
  688. { \
  689. all_in = 1; \
  690. all_in &= ((br - range) >= x->mv_row_min); \
  691. all_in &= ((br + range) <= x->mv_row_max); \
  692. all_in &= ((bc - range) >= x->mv_col_min); \
  693. all_in &= ((bc + range) <= x->mv_col_max); \
  694. }
  695. #define CHECK_POINT \
  696. { \
  697. if (this_mv.as_mv.col < x->mv_col_min) continue; \
  698. if (this_mv.as_mv.col > x->mv_col_max) continue; \
  699. if (this_mv.as_mv.row < x->mv_row_min) continue; \
  700. if (this_mv.as_mv.row > x->mv_row_max) continue; \
  701. }
  702. #define CHECK_BETTER \
  703. { \
  704. if (thissad < bestsad) { \
  705. thissad += \
  706. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); \
  707. if (thissad < bestsad) { \
  708. bestsad = thissad; \
  709. best_site = i; \
  710. } \
  711. } \
  712. }
  713. static const MV next_chkpts[6][3] = {
  714. { { -2, 0 }, { -1, -2 }, { 1, -2 } }, { { -1, -2 }, { 1, -2 }, { 2, 0 } },
  715. { { 1, -2 }, { 2, 0 }, { 1, 2 } }, { { 2, 0 }, { 1, 2 }, { -1, 2 } },
  716. { { 1, 2 }, { -1, 2 }, { -2, 0 } }, { { -1, 2 }, { -2, 0 }, { -1, -2 } }
  717. };
  718. int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
  719. int_mv *best_mv, int search_param, int sad_per_bit,
  720. const vp8_variance_fn_ptr_t *vfp, int *mvsadcost[2],
  721. int *mvcost[2], int_mv *center_mv) {
  722. MV hex[6] = {
  723. { -1, -2 }, { 1, -2 }, { 2, 0 }, { 1, 2 }, { -1, 2 }, { -2, 0 }
  724. };
  725. MV neighbors[4] = { { 0, -1 }, { -1, 0 }, { 1, 0 }, { 0, 1 } };
  726. int i, j;
  727. unsigned char *what = (*(b->base_src) + b->src);
  728. int what_stride = b->src_stride;
  729. int pre_stride = x->e_mbd.pre.y_stride;
  730. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  731. int in_what_stride = pre_stride;
  732. int br, bc;
  733. int_mv this_mv;
  734. unsigned int bestsad;
  735. unsigned int thissad;
  736. unsigned char *base_offset;
  737. unsigned char *this_offset;
  738. int k = -1;
  739. int all_in;
  740. int best_site = -1;
  741. int hex_range = 127;
  742. int dia_range = 8;
  743. int_mv fcenter_mv;
  744. fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
  745. fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
  746. (void)mvcost;
  747. /* adjust ref_mv to make sure it is within MV range */
  748. vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
  749. x->mv_row_max);
  750. br = ref_mv->as_mv.row;
  751. bc = ref_mv->as_mv.col;
  752. /* Work out the start point for the search */
  753. base_offset = (unsigned char *)(base_pre + d->offset);
  754. this_offset = base_offset + (br * (pre_stride)) + bc;
  755. this_mv.as_mv.row = br;
  756. this_mv.as_mv.col = bc;
  757. bestsad = vfp->sdf(what, what_stride, this_offset, in_what_stride) +
  758. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  759. #if CONFIG_MULTI_RES_ENCODING
  760. /* Lower search range based on prediction info */
  761. if (search_param >= 6)
  762. goto cal_neighbors;
  763. else if (search_param >= 5)
  764. hex_range = 4;
  765. else if (search_param >= 4)
  766. hex_range = 6;
  767. else if (search_param >= 3)
  768. hex_range = 15;
  769. else if (search_param >= 2)
  770. hex_range = 31;
  771. else if (search_param >= 1)
  772. hex_range = 63;
  773. dia_range = 8;
  774. #else
  775. (void)search_param;
  776. #endif
  777. /* hex search */
  778. CHECK_BOUNDS(2)
  779. if (all_in) {
  780. for (i = 0; i < 6; ++i) {
  781. this_mv.as_mv.row = br + hex[i].row;
  782. this_mv.as_mv.col = bc + hex[i].col;
  783. this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
  784. this_mv.as_mv.col;
  785. thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
  786. CHECK_BETTER
  787. }
  788. } else {
  789. for (i = 0; i < 6; ++i) {
  790. this_mv.as_mv.row = br + hex[i].row;
  791. this_mv.as_mv.col = bc + hex[i].col;
  792. CHECK_POINT
  793. this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
  794. this_mv.as_mv.col;
  795. thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
  796. CHECK_BETTER
  797. }
  798. }
  799. if (best_site == -1) {
  800. goto cal_neighbors;
  801. } else {
  802. br += hex[best_site].row;
  803. bc += hex[best_site].col;
  804. k = best_site;
  805. }
  806. for (j = 1; j < hex_range; ++j) {
  807. best_site = -1;
  808. CHECK_BOUNDS(2)
  809. if (all_in) {
  810. for (i = 0; i < 3; ++i) {
  811. this_mv.as_mv.row = br + next_chkpts[k][i].row;
  812. this_mv.as_mv.col = bc + next_chkpts[k][i].col;
  813. this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
  814. this_mv.as_mv.col;
  815. thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
  816. CHECK_BETTER
  817. }
  818. } else {
  819. for (i = 0; i < 3; ++i) {
  820. this_mv.as_mv.row = br + next_chkpts[k][i].row;
  821. this_mv.as_mv.col = bc + next_chkpts[k][i].col;
  822. CHECK_POINT
  823. this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
  824. this_mv.as_mv.col;
  825. thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
  826. CHECK_BETTER
  827. }
  828. }
  829. if (best_site == -1) {
  830. break;
  831. } else {
  832. br += next_chkpts[k][best_site].row;
  833. bc += next_chkpts[k][best_site].col;
  834. k += 5 + best_site;
  835. if (k >= 12) {
  836. k -= 12;
  837. } else if (k >= 6) {
  838. k -= 6;
  839. }
  840. }
  841. }
  842. /* check 4 1-away neighbors */
  843. cal_neighbors:
  844. for (j = 0; j < dia_range; ++j) {
  845. best_site = -1;
  846. CHECK_BOUNDS(1)
  847. if (all_in) {
  848. for (i = 0; i < 4; ++i) {
  849. this_mv.as_mv.row = br + neighbors[i].row;
  850. this_mv.as_mv.col = bc + neighbors[i].col;
  851. this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
  852. this_mv.as_mv.col;
  853. thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
  854. CHECK_BETTER
  855. }
  856. } else {
  857. for (i = 0; i < 4; ++i) {
  858. this_mv.as_mv.row = br + neighbors[i].row;
  859. this_mv.as_mv.col = bc + neighbors[i].col;
  860. CHECK_POINT
  861. this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
  862. this_mv.as_mv.col;
  863. thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride);
  864. CHECK_BETTER
  865. }
  866. }
  867. if (best_site == -1) {
  868. break;
  869. } else {
  870. br += neighbors[best_site].row;
  871. bc += neighbors[best_site].col;
  872. }
  873. }
  874. best_mv->as_mv.row = br;
  875. best_mv->as_mv.col = bc;
  876. return bestsad;
  877. }
  878. #undef CHECK_BOUNDS
  879. #undef CHECK_POINT
  880. #undef CHECK_BETTER
  881. int vp8_diamond_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
  882. int_mv *best_mv, int search_param, int sad_per_bit,
  883. int *num00, vp8_variance_fn_ptr_t *fn_ptr,
  884. int *mvcost[2], int_mv *center_mv) {
  885. int i, j, step;
  886. unsigned char *what = (*(b->base_src) + b->src);
  887. int what_stride = b->src_stride;
  888. unsigned char *in_what;
  889. int pre_stride = x->e_mbd.pre.y_stride;
  890. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  891. int in_what_stride = pre_stride;
  892. unsigned char *best_address;
  893. int tot_steps;
  894. int_mv this_mv;
  895. unsigned int bestsad;
  896. unsigned int thissad;
  897. int best_site = 0;
  898. int last_site = 0;
  899. int ref_row;
  900. int ref_col;
  901. int this_row_offset;
  902. int this_col_offset;
  903. search_site *ss;
  904. unsigned char *check_here;
  905. int *mvsadcost[2];
  906. int_mv fcenter_mv;
  907. mvsadcost[0] = x->mvsadcost[0];
  908. mvsadcost[1] = x->mvsadcost[1];
  909. fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
  910. fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
  911. vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
  912. x->mv_row_max);
  913. ref_row = ref_mv->as_mv.row;
  914. ref_col = ref_mv->as_mv.col;
  915. *num00 = 0;
  916. best_mv->as_mv.row = ref_row;
  917. best_mv->as_mv.col = ref_col;
  918. /* Work out the start point for the search */
  919. in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) +
  920. ref_col);
  921. best_address = in_what;
  922. /* Check the starting position */
  923. bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) +
  924. mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  925. /* search_param determines the length of the initial step and hence
  926. * the number of iterations 0 = initial step (MAX_FIRST_STEP) pel :
  927. * 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
  928. */
  929. ss = &x->ss[search_param * x->searches_per_step];
  930. tot_steps = (x->ss_count / x->searches_per_step) - search_param;
  931. i = 1;
  932. for (step = 0; step < tot_steps; ++step) {
  933. for (j = 0; j < x->searches_per_step; ++j) {
  934. /* Trap illegal vectors */
  935. this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
  936. this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
  937. if ((this_col_offset > x->mv_col_min) &&
  938. (this_col_offset < x->mv_col_max) &&
  939. (this_row_offset > x->mv_row_min) &&
  940. (this_row_offset < x->mv_row_max))
  941. {
  942. check_here = ss[i].offset + best_address;
  943. thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
  944. if (thissad < bestsad) {
  945. this_mv.as_mv.row = this_row_offset;
  946. this_mv.as_mv.col = this_col_offset;
  947. thissad +=
  948. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  949. if (thissad < bestsad) {
  950. bestsad = thissad;
  951. best_site = i;
  952. }
  953. }
  954. }
  955. i++;
  956. }
  957. if (best_site != last_site) {
  958. best_mv->as_mv.row += ss[best_site].mv.row;
  959. best_mv->as_mv.col += ss[best_site].mv.col;
  960. best_address += ss[best_site].offset;
  961. last_site = best_site;
  962. } else if (best_address == in_what) {
  963. (*num00)++;
  964. }
  965. }
  966. this_mv.as_mv.row = best_mv->as_mv.row << 3;
  967. this_mv.as_mv.col = best_mv->as_mv.col << 3;
  968. return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) +
  969. mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
  970. }
  971. #if HAVE_SSE2 || HAVE_MSA
  972. int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
  973. int_mv *best_mv, int search_param, int sad_per_bit,
  974. int *num00, vp8_variance_fn_ptr_t *fn_ptr,
  975. int *mvcost[2], int_mv *center_mv) {
  976. int i, j, step;
  977. unsigned char *what = (*(b->base_src) + b->src);
  978. int what_stride = b->src_stride;
  979. unsigned char *in_what;
  980. int pre_stride = x->e_mbd.pre.y_stride;
  981. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  982. int in_what_stride = pre_stride;
  983. unsigned char *best_address;
  984. int tot_steps;
  985. int_mv this_mv;
  986. unsigned int bestsad;
  987. unsigned int thissad;
  988. int best_site = 0;
  989. int last_site = 0;
  990. int ref_row;
  991. int ref_col;
  992. int this_row_offset;
  993. int this_col_offset;
  994. search_site *ss;
  995. unsigned char *check_here;
  996. int *mvsadcost[2];
  997. int_mv fcenter_mv;
  998. mvsadcost[0] = x->mvsadcost[0];
  999. mvsadcost[1] = x->mvsadcost[1];
  1000. fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
  1001. fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
  1002. vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
  1003. x->mv_row_max);
  1004. ref_row = ref_mv->as_mv.row;
  1005. ref_col = ref_mv->as_mv.col;
  1006. *num00 = 0;
  1007. best_mv->as_mv.row = ref_row;
  1008. best_mv->as_mv.col = ref_col;
  1009. /* Work out the start point for the search */
  1010. in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) +
  1011. ref_col);
  1012. best_address = in_what;
  1013. /* Check the starting position */
  1014. bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) +
  1015. mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1016. /* search_param determines the length of the initial step and hence the
  1017. * number of iterations 0 = initial step (MAX_FIRST_STEP) pel : 1 =
  1018. * (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
  1019. */
  1020. ss = &x->ss[search_param * x->searches_per_step];
  1021. tot_steps = (x->ss_count / x->searches_per_step) - search_param;
  1022. i = 1;
  1023. for (step = 0; step < tot_steps; ++step) {
  1024. int all_in = 1, t;
  1025. /* To know if all neighbor points are within the bounds, 4 bounds
  1026. * checking are enough instead of checking 4 bounds for each
  1027. * points.
  1028. */
  1029. all_in &= ((best_mv->as_mv.row + ss[i].mv.row) > x->mv_row_min);
  1030. all_in &= ((best_mv->as_mv.row + ss[i + 1].mv.row) < x->mv_row_max);
  1031. all_in &= ((best_mv->as_mv.col + ss[i + 2].mv.col) > x->mv_col_min);
  1032. all_in &= ((best_mv->as_mv.col + ss[i + 3].mv.col) < x->mv_col_max);
  1033. if (all_in) {
  1034. unsigned int sad_array[4];
  1035. for (j = 0; j < x->searches_per_step; j += 4) {
  1036. const unsigned char *block_offset[4];
  1037. for (t = 0; t < 4; ++t) {
  1038. block_offset[t] = ss[i + t].offset + best_address;
  1039. }
  1040. fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
  1041. sad_array);
  1042. for (t = 0; t < 4; t++, i++) {
  1043. if (sad_array[t] < bestsad) {
  1044. this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
  1045. this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
  1046. sad_array[t] +=
  1047. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1048. if (sad_array[t] < bestsad) {
  1049. bestsad = sad_array[t];
  1050. best_site = i;
  1051. }
  1052. }
  1053. }
  1054. }
  1055. } else {
  1056. for (j = 0; j < x->searches_per_step; ++j) {
  1057. /* Trap illegal vectors */
  1058. this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
  1059. this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
  1060. if ((this_col_offset > x->mv_col_min) &&
  1061. (this_col_offset < x->mv_col_max) &&
  1062. (this_row_offset > x->mv_row_min) &&
  1063. (this_row_offset < x->mv_row_max)) {
  1064. check_here = ss[i].offset + best_address;
  1065. thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
  1066. if (thissad < bestsad) {
  1067. this_mv.as_mv.row = this_row_offset;
  1068. this_mv.as_mv.col = this_col_offset;
  1069. thissad +=
  1070. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1071. if (thissad < bestsad) {
  1072. bestsad = thissad;
  1073. best_site = i;
  1074. }
  1075. }
  1076. }
  1077. i++;
  1078. }
  1079. }
  1080. if (best_site != last_site) {
  1081. best_mv->as_mv.row += ss[best_site].mv.row;
  1082. best_mv->as_mv.col += ss[best_site].mv.col;
  1083. best_address += ss[best_site].offset;
  1084. last_site = best_site;
  1085. } else if (best_address == in_what) {
  1086. (*num00)++;
  1087. }
  1088. }
  1089. this_mv.as_mv.row = best_mv->as_mv.row * 8;
  1090. this_mv.as_mv.col = best_mv->as_mv.col * 8;
  1091. return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) +
  1092. mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
  1093. }
  1094. #endif // HAVE_SSE2 || HAVE_MSA
  1095. int vp8_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
  1096. int sad_per_bit, int distance,
  1097. vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
  1098. int_mv *center_mv) {
  1099. unsigned char *what = (*(b->base_src) + b->src);
  1100. int what_stride = b->src_stride;
  1101. unsigned char *in_what;
  1102. int pre_stride = x->e_mbd.pre.y_stride;
  1103. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  1104. int in_what_stride = pre_stride;
  1105. int mv_stride = pre_stride;
  1106. unsigned char *bestaddress;
  1107. int_mv *best_mv = &d->bmi.mv;
  1108. int_mv this_mv;
  1109. unsigned int bestsad;
  1110. unsigned int thissad;
  1111. int r, c;
  1112. unsigned char *check_here;
  1113. int ref_row = ref_mv->as_mv.row;
  1114. int ref_col = ref_mv->as_mv.col;
  1115. int row_min = ref_row - distance;
  1116. int row_max = ref_row + distance;
  1117. int col_min = ref_col - distance;
  1118. int col_max = ref_col + distance;
  1119. int *mvsadcost[2];
  1120. int_mv fcenter_mv;
  1121. mvsadcost[0] = x->mvsadcost[0];
  1122. mvsadcost[1] = x->mvsadcost[1];
  1123. fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
  1124. fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
  1125. /* Work out the mid point for the search */
  1126. in_what = base_pre + d->offset;
  1127. bestaddress = in_what + (ref_row * pre_stride) + ref_col;
  1128. best_mv->as_mv.row = ref_row;
  1129. best_mv->as_mv.col = ref_col;
  1130. /* Baseline value at the centre */
  1131. bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) +
  1132. mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1133. /* Apply further limits to prevent us looking using vectors that
  1134. * stretch beyiond the UMV border
  1135. */
  1136. if (col_min < x->mv_col_min) col_min = x->mv_col_min;
  1137. if (col_max > x->mv_col_max) col_max = x->mv_col_max;
  1138. if (row_min < x->mv_row_min) row_min = x->mv_row_min;
  1139. if (row_max > x->mv_row_max) row_max = x->mv_row_max;
  1140. for (r = row_min; r < row_max; ++r) {
  1141. this_mv.as_mv.row = r;
  1142. check_here = r * mv_stride + in_what + col_min;
  1143. for (c = col_min; c < col_max; ++c) {
  1144. thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
  1145. this_mv.as_mv.col = c;
  1146. thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1147. if (thissad < bestsad) {
  1148. bestsad = thissad;
  1149. best_mv->as_mv.row = r;
  1150. best_mv->as_mv.col = c;
  1151. bestaddress = check_here;
  1152. }
  1153. check_here++;
  1154. }
  1155. }
  1156. this_mv.as_mv.row = best_mv->as_mv.row << 3;
  1157. this_mv.as_mv.col = best_mv->as_mv.col << 3;
  1158. return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) +
  1159. mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
  1160. }
  1161. #if HAVE_SSSE3
  1162. int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
  1163. int sad_per_bit, int distance,
  1164. vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
  1165. int_mv *center_mv) {
  1166. unsigned char *what = (*(b->base_src) + b->src);
  1167. int what_stride = b->src_stride;
  1168. unsigned char *in_what;
  1169. int pre_stride = x->e_mbd.pre.y_stride;
  1170. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  1171. int in_what_stride = pre_stride;
  1172. int mv_stride = pre_stride;
  1173. unsigned char *bestaddress;
  1174. int_mv *best_mv = &d->bmi.mv;
  1175. int_mv this_mv;
  1176. unsigned int bestsad;
  1177. unsigned int thissad;
  1178. int r, c;
  1179. unsigned char *check_here;
  1180. int ref_row = ref_mv->as_mv.row;
  1181. int ref_col = ref_mv->as_mv.col;
  1182. int row_min = ref_row - distance;
  1183. int row_max = ref_row + distance;
  1184. int col_min = ref_col - distance;
  1185. int col_max = ref_col + distance;
  1186. unsigned int sad_array[3];
  1187. int *mvsadcost[2];
  1188. int_mv fcenter_mv;
  1189. mvsadcost[0] = x->mvsadcost[0];
  1190. mvsadcost[1] = x->mvsadcost[1];
  1191. fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
  1192. fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
  1193. /* Work out the mid point for the search */
  1194. in_what = base_pre + d->offset;
  1195. bestaddress = in_what + (ref_row * pre_stride) + ref_col;
  1196. best_mv->as_mv.row = ref_row;
  1197. best_mv->as_mv.col = ref_col;
  1198. /* Baseline value at the centre */
  1199. bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) +
  1200. mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1201. /* Apply further limits to prevent us looking using vectors that stretch
  1202. * beyond the UMV border
  1203. */
  1204. if (col_min < x->mv_col_min) col_min = x->mv_col_min;
  1205. if (col_max > x->mv_col_max) col_max = x->mv_col_max;
  1206. if (row_min < x->mv_row_min) row_min = x->mv_row_min;
  1207. if (row_max > x->mv_row_max) row_max = x->mv_row_max;
  1208. for (r = row_min; r < row_max; ++r) {
  1209. this_mv.as_mv.row = r;
  1210. check_here = r * mv_stride + in_what + col_min;
  1211. c = col_min;
  1212. while ((c + 2) < col_max) {
  1213. int i;
  1214. fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
  1215. for (i = 0; i < 3; ++i) {
  1216. thissad = sad_array[i];
  1217. if (thissad < bestsad) {
  1218. this_mv.as_mv.col = c;
  1219. thissad +=
  1220. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1221. if (thissad < bestsad) {
  1222. bestsad = thissad;
  1223. best_mv->as_mv.row = r;
  1224. best_mv->as_mv.col = c;
  1225. bestaddress = check_here;
  1226. }
  1227. }
  1228. check_here++;
  1229. c++;
  1230. }
  1231. }
  1232. while (c < col_max) {
  1233. thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
  1234. if (thissad < bestsad) {
  1235. this_mv.as_mv.col = c;
  1236. thissad +=
  1237. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1238. if (thissad < bestsad) {
  1239. bestsad = thissad;
  1240. best_mv->as_mv.row = r;
  1241. best_mv->as_mv.col = c;
  1242. bestaddress = check_here;
  1243. }
  1244. }
  1245. check_here++;
  1246. c++;
  1247. }
  1248. }
  1249. this_mv.as_mv.row = best_mv->as_mv.row << 3;
  1250. this_mv.as_mv.col = best_mv->as_mv.col << 3;
  1251. return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) +
  1252. mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
  1253. }
  1254. #endif // HAVE_SSSE3
  1255. #if HAVE_SSE4_1
  1256. int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
  1257. int sad_per_bit, int distance,
  1258. vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
  1259. int_mv *center_mv) {
  1260. unsigned char *what = (*(b->base_src) + b->src);
  1261. int what_stride = b->src_stride;
  1262. int pre_stride = x->e_mbd.pre.y_stride;
  1263. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  1264. unsigned char *in_what;
  1265. int in_what_stride = pre_stride;
  1266. int mv_stride = pre_stride;
  1267. unsigned char *bestaddress;
  1268. int_mv *best_mv = &d->bmi.mv;
  1269. int_mv this_mv;
  1270. unsigned int bestsad;
  1271. unsigned int thissad;
  1272. int r, c;
  1273. unsigned char *check_here;
  1274. int ref_row = ref_mv->as_mv.row;
  1275. int ref_col = ref_mv->as_mv.col;
  1276. int row_min = ref_row - distance;
  1277. int row_max = ref_row + distance;
  1278. int col_min = ref_col - distance;
  1279. int col_max = ref_col + distance;
  1280. DECLARE_ALIGNED(16, unsigned int, sad_array8[8]);
  1281. unsigned int sad_array[3];
  1282. int *mvsadcost[2];
  1283. int_mv fcenter_mv;
  1284. mvsadcost[0] = x->mvsadcost[0];
  1285. mvsadcost[1] = x->mvsadcost[1];
  1286. fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
  1287. fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
  1288. /* Work out the mid point for the search */
  1289. in_what = base_pre + d->offset;
  1290. bestaddress = in_what + (ref_row * pre_stride) + ref_col;
  1291. best_mv->as_mv.row = ref_row;
  1292. best_mv->as_mv.col = ref_col;
  1293. /* Baseline value at the centre */
  1294. bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) +
  1295. mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1296. /* Apply further limits to prevent us looking using vectors that stretch
  1297. * beyond the UMV border
  1298. */
  1299. if (col_min < x->mv_col_min) col_min = x->mv_col_min;
  1300. if (col_max > x->mv_col_max) col_max = x->mv_col_max;
  1301. if (row_min < x->mv_row_min) row_min = x->mv_row_min;
  1302. if (row_max > x->mv_row_max) row_max = x->mv_row_max;
  1303. for (r = row_min; r < row_max; ++r) {
  1304. this_mv.as_mv.row = r;
  1305. check_here = r * mv_stride + in_what + col_min;
  1306. c = col_min;
  1307. while ((c + 7) < col_max) {
  1308. int i;
  1309. fn_ptr->sdx8f(what, what_stride, check_here, in_what_stride, sad_array8);
  1310. for (i = 0; i < 8; ++i) {
  1311. thissad = sad_array8[i];
  1312. if (thissad < bestsad) {
  1313. this_mv.as_mv.col = c;
  1314. thissad +=
  1315. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1316. if (thissad < bestsad) {
  1317. bestsad = thissad;
  1318. best_mv->as_mv.row = r;
  1319. best_mv->as_mv.col = c;
  1320. bestaddress = check_here;
  1321. }
  1322. }
  1323. check_here++;
  1324. c++;
  1325. }
  1326. }
  1327. while ((c + 2) < col_max) {
  1328. int i;
  1329. fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
  1330. for (i = 0; i < 3; ++i) {
  1331. thissad = sad_array[i];
  1332. if (thissad < bestsad) {
  1333. this_mv.as_mv.col = c;
  1334. thissad +=
  1335. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1336. if (thissad < bestsad) {
  1337. bestsad = thissad;
  1338. best_mv->as_mv.row = r;
  1339. best_mv->as_mv.col = c;
  1340. bestaddress = check_here;
  1341. }
  1342. }
  1343. check_here++;
  1344. c++;
  1345. }
  1346. }
  1347. while (c < col_max) {
  1348. thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
  1349. if (thissad < bestsad) {
  1350. this_mv.as_mv.col = c;
  1351. thissad +=
  1352. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
  1353. if (thissad < bestsad) {
  1354. bestsad = thissad;
  1355. best_mv->as_mv.row = r;
  1356. best_mv->as_mv.col = c;
  1357. bestaddress = check_here;
  1358. }
  1359. }
  1360. check_here++;
  1361. c++;
  1362. }
  1363. }
  1364. this_mv.as_mv.row = best_mv->as_mv.row * 8;
  1365. this_mv.as_mv.col = best_mv->as_mv.col * 8;
  1366. return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) +
  1367. mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
  1368. }
  1369. #endif // HAVE_SSE4_1
  1370. int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
  1371. int_mv *ref_mv, int error_per_bit,
  1372. int search_range, vp8_variance_fn_ptr_t *fn_ptr,
  1373. int *mvcost[2], int_mv *center_mv) {
  1374. MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
  1375. int i, j;
  1376. short this_row_offset, this_col_offset;
  1377. int what_stride = b->src_stride;
  1378. int pre_stride = x->e_mbd.pre.y_stride;
  1379. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  1380. int in_what_stride = pre_stride;
  1381. unsigned char *what = (*(b->base_src) + b->src);
  1382. unsigned char *best_address =
  1383. (unsigned char *)(base_pre + d->offset +
  1384. (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col);
  1385. unsigned char *check_here;
  1386. int_mv this_mv;
  1387. unsigned int bestsad;
  1388. unsigned int thissad;
  1389. int *mvsadcost[2];
  1390. int_mv fcenter_mv;
  1391. mvsadcost[0] = x->mvsadcost[0];
  1392. mvsadcost[1] = x->mvsadcost[1];
  1393. fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
  1394. fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
  1395. bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) +
  1396. mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
  1397. for (i = 0; i < search_range; ++i) {
  1398. int best_site = -1;
  1399. for (j = 0; j < 4; ++j) {
  1400. this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
  1401. this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
  1402. if ((this_col_offset > x->mv_col_min) &&
  1403. (this_col_offset < x->mv_col_max) &&
  1404. (this_row_offset > x->mv_row_min) &&
  1405. (this_row_offset < x->mv_row_max)) {
  1406. check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col +
  1407. best_address;
  1408. thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
  1409. if (thissad < bestsad) {
  1410. this_mv.as_mv.row = this_row_offset;
  1411. this_mv.as_mv.col = this_col_offset;
  1412. thissad +=
  1413. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
  1414. if (thissad < bestsad) {
  1415. bestsad = thissad;
  1416. best_site = j;
  1417. }
  1418. }
  1419. }
  1420. }
  1421. if (best_site == -1) {
  1422. break;
  1423. } else {
  1424. ref_mv->as_mv.row += neighbors[best_site].row;
  1425. ref_mv->as_mv.col += neighbors[best_site].col;
  1426. best_address += (neighbors[best_site].row) * in_what_stride +
  1427. neighbors[best_site].col;
  1428. }
  1429. }
  1430. this_mv.as_mv.row = ref_mv->as_mv.row << 3;
  1431. this_mv.as_mv.col = ref_mv->as_mv.col << 3;
  1432. return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) +
  1433. mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
  1434. }
  1435. #if HAVE_SSE2 || HAVE_MSA
  1436. int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
  1437. int_mv *ref_mv, int error_per_bit,
  1438. int search_range, vp8_variance_fn_ptr_t *fn_ptr,
  1439. int *mvcost[2], int_mv *center_mv) {
  1440. MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
  1441. int i, j;
  1442. short this_row_offset, this_col_offset;
  1443. int what_stride = b->src_stride;
  1444. int pre_stride = x->e_mbd.pre.y_stride;
  1445. unsigned char *base_pre = x->e_mbd.pre.y_buffer;
  1446. int in_what_stride = pre_stride;
  1447. unsigned char *what = (*(b->base_src) + b->src);
  1448. unsigned char *best_address =
  1449. (unsigned char *)(base_pre + d->offset +
  1450. (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col);
  1451. unsigned char *check_here;
  1452. int_mv this_mv;
  1453. unsigned int bestsad;
  1454. unsigned int thissad;
  1455. int *mvsadcost[2];
  1456. int_mv fcenter_mv;
  1457. mvsadcost[0] = x->mvsadcost[0];
  1458. mvsadcost[1] = x->mvsadcost[1];
  1459. fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
  1460. fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
  1461. bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) +
  1462. mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
  1463. for (i = 0; i < search_range; ++i) {
  1464. int best_site = -1;
  1465. int all_in = 1;
  1466. all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
  1467. all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
  1468. all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
  1469. all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
  1470. if (all_in) {
  1471. unsigned int sad_array[4];
  1472. const unsigned char *block_offset[4];
  1473. block_offset[0] = best_address - in_what_stride;
  1474. block_offset[1] = best_address - 1;
  1475. block_offset[2] = best_address + 1;
  1476. block_offset[3] = best_address + in_what_stride;
  1477. fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
  1478. sad_array);
  1479. for (j = 0; j < 4; ++j) {
  1480. if (sad_array[j] < bestsad) {
  1481. this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
  1482. this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
  1483. sad_array[j] +=
  1484. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
  1485. if (sad_array[j] < bestsad) {
  1486. bestsad = sad_array[j];
  1487. best_site = j;
  1488. }
  1489. }
  1490. }
  1491. } else {
  1492. for (j = 0; j < 4; ++j) {
  1493. this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
  1494. this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
  1495. if ((this_col_offset > x->mv_col_min) &&
  1496. (this_col_offset < x->mv_col_max) &&
  1497. (this_row_offset > x->mv_row_min) &&
  1498. (this_row_offset < x->mv_row_max)) {
  1499. check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col +
  1500. best_address;
  1501. thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
  1502. if (thissad < bestsad) {
  1503. this_mv.as_mv.row = this_row_offset;
  1504. this_mv.as_mv.col = this_col_offset;
  1505. thissad +=
  1506. mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
  1507. if (thissad < bestsad) {
  1508. bestsad = thissad;
  1509. best_site = j;
  1510. }
  1511. }
  1512. }
  1513. }
  1514. }
  1515. if (best_site == -1) {
  1516. break;
  1517. } else {
  1518. ref_mv->as_mv.row += neighbors[best_site].row;
  1519. ref_mv->as_mv.col += neighbors[best_site].col;
  1520. best_address += (neighbors[best_site].row) * in_what_stride +
  1521. neighbors[best_site].col;
  1522. }
  1523. }
  1524. this_mv.as_mv.row = ref_mv->as_mv.row * 8;
  1525. this_mv.as_mv.col = ref_mv->as_mv.col * 8;
  1526. return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) +
  1527. mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
  1528. }
  1529. #endif // HAVE_SSE2 || HAVE_MSA