2
0

reconinter.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <limits.h>
  11. #include <string.h>
  12. #include "vpx_config.h"
  13. #include "vp8_rtcd.h"
  14. #include "vpx/vpx_integer.h"
  15. #include "blockd.h"
  16. #include "reconinter.h"
  17. #if CONFIG_RUNTIME_CPU_DETECT
  18. #include "onyxc_int.h"
  19. #endif
  20. void vp8_copy_mem16x16_c(unsigned char *src, int src_stride, unsigned char *dst,
  21. int dst_stride) {
  22. int r;
  23. for (r = 0; r < 16; ++r) {
  24. memcpy(dst, src, 16);
  25. src += src_stride;
  26. dst += dst_stride;
  27. }
  28. }
  29. void vp8_copy_mem8x8_c(unsigned char *src, int src_stride, unsigned char *dst,
  30. int dst_stride) {
  31. int r;
  32. for (r = 0; r < 8; ++r) {
  33. memcpy(dst, src, 8);
  34. src += src_stride;
  35. dst += dst_stride;
  36. }
  37. }
  38. void vp8_copy_mem8x4_c(unsigned char *src, int src_stride, unsigned char *dst,
  39. int dst_stride) {
  40. int r;
  41. for (r = 0; r < 4; ++r) {
  42. memcpy(dst, src, 8);
  43. src += src_stride;
  44. dst += dst_stride;
  45. }
  46. }
  47. void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre,
  48. int pre_stride, vp8_subpix_fn_t sppf) {
  49. int r;
  50. unsigned char *pred_ptr = d->predictor;
  51. unsigned char *ptr;
  52. ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
  53. (d->bmi.mv.as_mv.col >> 3);
  54. if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
  55. sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7,
  56. pred_ptr, pitch);
  57. } else {
  58. for (r = 0; r < 4; ++r) {
  59. pred_ptr[0] = ptr[0];
  60. pred_ptr[1] = ptr[1];
  61. pred_ptr[2] = ptr[2];
  62. pred_ptr[3] = ptr[3];
  63. pred_ptr += pitch;
  64. ptr += pre_stride;
  65. }
  66. }
  67. }
  68. static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d,
  69. unsigned char *dst, int dst_stride,
  70. unsigned char *base_pre, int pre_stride) {
  71. unsigned char *ptr;
  72. ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
  73. (d->bmi.mv.as_mv.col >> 3);
  74. if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
  75. x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7,
  76. d->bmi.mv.as_mv.row & 7, dst, dst_stride);
  77. } else {
  78. vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride);
  79. }
  80. }
  81. static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d,
  82. unsigned char *dst, int dst_stride,
  83. unsigned char *base_pre, int pre_stride) {
  84. unsigned char *ptr;
  85. ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
  86. (d->bmi.mv.as_mv.col >> 3);
  87. if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
  88. x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7,
  89. d->bmi.mv.as_mv.row & 7, dst, dst_stride);
  90. } else {
  91. vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride);
  92. }
  93. }
  94. static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst,
  95. int dst_stride, unsigned char *base_pre,
  96. int pre_stride, vp8_subpix_fn_t sppf) {
  97. int r;
  98. unsigned char *ptr;
  99. ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
  100. (d->bmi.mv.as_mv.col >> 3);
  101. if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
  102. sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst,
  103. dst_stride);
  104. } else {
  105. for (r = 0; r < 4; ++r) {
  106. dst[0] = ptr[0];
  107. dst[1] = ptr[1];
  108. dst[2] = ptr[2];
  109. dst[3] = ptr[3];
  110. dst += dst_stride;
  111. ptr += pre_stride;
  112. }
  113. }
  114. }
  115. /*encoder only*/
  116. void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x) {
  117. unsigned char *uptr, *vptr;
  118. unsigned char *upred_ptr = &x->predictor[256];
  119. unsigned char *vpred_ptr = &x->predictor[320];
  120. int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
  121. int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
  122. int offset;
  123. int pre_stride = x->pre.uv_stride;
  124. /* calc uv motion vectors */
  125. mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1));
  126. mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1));
  127. mv_row /= 2;
  128. mv_col /= 2;
  129. mv_row &= x->fullpixel_mask;
  130. mv_col &= x->fullpixel_mask;
  131. offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
  132. uptr = x->pre.u_buffer + offset;
  133. vptr = x->pre.v_buffer + offset;
  134. if ((mv_row | mv_col) & 7) {
  135. x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr,
  136. 8);
  137. x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr,
  138. 8);
  139. } else {
  140. vp8_copy_mem8x8(uptr, pre_stride, upred_ptr, 8);
  141. vp8_copy_mem8x8(vptr, pre_stride, vpred_ptr, 8);
  142. }
  143. }
  144. /*encoder only*/
  145. void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) {
  146. int i, j;
  147. int pre_stride = x->pre.uv_stride;
  148. unsigned char *base_pre;
  149. /* build uv mvs */
  150. for (i = 0; i < 2; ++i) {
  151. for (j = 0; j < 2; ++j) {
  152. int yoffset = i * 8 + j * 2;
  153. int uoffset = 16 + i * 2 + j;
  154. int voffset = 20 + i * 2 + j;
  155. int temp;
  156. temp = x->block[yoffset].bmi.mv.as_mv.row +
  157. x->block[yoffset + 1].bmi.mv.as_mv.row +
  158. x->block[yoffset + 4].bmi.mv.as_mv.row +
  159. x->block[yoffset + 5].bmi.mv.as_mv.row;
  160. temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
  161. x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
  162. temp = x->block[yoffset].bmi.mv.as_mv.col +
  163. x->block[yoffset + 1].bmi.mv.as_mv.col +
  164. x->block[yoffset + 4].bmi.mv.as_mv.col +
  165. x->block[yoffset + 5].bmi.mv.as_mv.col;
  166. temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
  167. x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
  168. x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
  169. }
  170. }
  171. base_pre = x->pre.u_buffer;
  172. for (i = 16; i < 20; i += 2) {
  173. BLOCKD *d0 = &x->block[i];
  174. BLOCKD *d1 = &x->block[i + 1];
  175. if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
  176. build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
  177. } else {
  178. vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride,
  179. x->subpixel_predict);
  180. vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride,
  181. x->subpixel_predict);
  182. }
  183. }
  184. base_pre = x->pre.v_buffer;
  185. for (i = 20; i < 24; i += 2) {
  186. BLOCKD *d0 = &x->block[i];
  187. BLOCKD *d1 = &x->block[i + 1];
  188. if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
  189. build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
  190. } else {
  191. vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride,
  192. x->subpixel_predict);
  193. vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride,
  194. x->subpixel_predict);
  195. }
  196. }
  197. }
  198. /*encoder only*/
  199. void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, unsigned char *dst_y,
  200. int dst_ystride) {
  201. unsigned char *ptr_base;
  202. unsigned char *ptr;
  203. int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
  204. int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
  205. int pre_stride = x->pre.y_stride;
  206. ptr_base = x->pre.y_buffer;
  207. ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
  208. if ((mv_row | mv_col) & 7) {
  209. x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y,
  210. dst_ystride);
  211. } else {
  212. vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
  213. }
  214. }
  215. static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
  216. /* If the MV points so far into the UMV border that no visible pixels
  217. * are used for reconstruction, the subpel part of the MV can be
  218. * discarded and the MV limited to 16 pixels with equivalent results.
  219. *
  220. * This limit kicks in at 19 pixels for the top and left edges, for
  221. * the 16 pixels plus 3 taps right of the central pixel when subpel
  222. * filtering. The bottom and right edges use 16 pixels plus 2 pixels
  223. * left of the central pixel when filtering.
  224. */
  225. if (mv->col < (xd->mb_to_left_edge - (19 << 3))) {
  226. mv->col = xd->mb_to_left_edge - (16 << 3);
  227. } else if (mv->col > xd->mb_to_right_edge + (18 << 3)) {
  228. mv->col = xd->mb_to_right_edge + (16 << 3);
  229. }
  230. if (mv->row < (xd->mb_to_top_edge - (19 << 3))) {
  231. mv->row = xd->mb_to_top_edge - (16 << 3);
  232. } else if (mv->row > xd->mb_to_bottom_edge + (18 << 3)) {
  233. mv->row = xd->mb_to_bottom_edge + (16 << 3);
  234. }
  235. }
  236. /* A version of the above function for chroma block MVs.*/
  237. static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
  238. mv->col = (2 * mv->col < (xd->mb_to_left_edge - (19 << 3)))
  239. ? (xd->mb_to_left_edge - (16 << 3)) >> 1
  240. : mv->col;
  241. mv->col = (2 * mv->col > xd->mb_to_right_edge + (18 << 3))
  242. ? (xd->mb_to_right_edge + (16 << 3)) >> 1
  243. : mv->col;
  244. mv->row = (2 * mv->row < (xd->mb_to_top_edge - (19 << 3)))
  245. ? (xd->mb_to_top_edge - (16 << 3)) >> 1
  246. : mv->row;
  247. mv->row = (2 * mv->row > xd->mb_to_bottom_edge + (18 << 3))
  248. ? (xd->mb_to_bottom_edge + (16 << 3)) >> 1
  249. : mv->row;
  250. }
  251. void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, unsigned char *dst_y,
  252. unsigned char *dst_u,
  253. unsigned char *dst_v, int dst_ystride,
  254. int dst_uvstride) {
  255. int offset;
  256. unsigned char *ptr;
  257. unsigned char *uptr, *vptr;
  258. int_mv _16x16mv;
  259. unsigned char *ptr_base = x->pre.y_buffer;
  260. int pre_stride = x->pre.y_stride;
  261. _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
  262. if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
  263. clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
  264. }
  265. ptr = ptr_base + (_16x16mv.as_mv.row >> 3) * pre_stride +
  266. (_16x16mv.as_mv.col >> 3);
  267. if (_16x16mv.as_int & 0x00070007) {
  268. x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7,
  269. _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
  270. } else {
  271. vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
  272. }
  273. /* calc uv motion vectors */
  274. _16x16mv.as_mv.row +=
  275. 1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1));
  276. _16x16mv.as_mv.col +=
  277. 1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1));
  278. _16x16mv.as_mv.row /= 2;
  279. _16x16mv.as_mv.col /= 2;
  280. _16x16mv.as_mv.row &= x->fullpixel_mask;
  281. _16x16mv.as_mv.col &= x->fullpixel_mask;
  282. if (2 * _16x16mv.as_mv.col < (x->mb_to_left_edge - (19 << 3)) ||
  283. 2 * _16x16mv.as_mv.col > x->mb_to_right_edge + (18 << 3) ||
  284. 2 * _16x16mv.as_mv.row < (x->mb_to_top_edge - (19 << 3)) ||
  285. 2 * _16x16mv.as_mv.row > x->mb_to_bottom_edge + (18 << 3)) {
  286. return;
  287. }
  288. pre_stride >>= 1;
  289. offset = (_16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
  290. uptr = x->pre.u_buffer + offset;
  291. vptr = x->pre.v_buffer + offset;
  292. if (_16x16mv.as_int & 0x00070007) {
  293. x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7,
  294. _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
  295. x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7,
  296. _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
  297. } else {
  298. vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
  299. vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
  300. }
  301. }
  302. static void build_inter4x4_predictors_mb(MACROBLOCKD *x) {
  303. int i;
  304. unsigned char *base_dst = x->dst.y_buffer;
  305. unsigned char *base_pre = x->pre.y_buffer;
  306. if (x->mode_info_context->mbmi.partitioning < 3) {
  307. BLOCKD *b;
  308. int dst_stride = x->dst.y_stride;
  309. x->block[0].bmi = x->mode_info_context->bmi[0];
  310. x->block[2].bmi = x->mode_info_context->bmi[2];
  311. x->block[8].bmi = x->mode_info_context->bmi[8];
  312. x->block[10].bmi = x->mode_info_context->bmi[10];
  313. if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
  314. clamp_mv_to_umv_border(&x->block[0].bmi.mv.as_mv, x);
  315. clamp_mv_to_umv_border(&x->block[2].bmi.mv.as_mv, x);
  316. clamp_mv_to_umv_border(&x->block[8].bmi.mv.as_mv, x);
  317. clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x);
  318. }
  319. b = &x->block[0];
  320. build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
  321. dst_stride);
  322. b = &x->block[2];
  323. build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
  324. dst_stride);
  325. b = &x->block[8];
  326. build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
  327. dst_stride);
  328. b = &x->block[10];
  329. build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
  330. dst_stride);
  331. } else {
  332. for (i = 0; i < 16; i += 2) {
  333. BLOCKD *d0 = &x->block[i];
  334. BLOCKD *d1 = &x->block[i + 1];
  335. int dst_stride = x->dst.y_stride;
  336. x->block[i + 0].bmi = x->mode_info_context->bmi[i + 0];
  337. x->block[i + 1].bmi = x->mode_info_context->bmi[i + 1];
  338. if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
  339. clamp_mv_to_umv_border(&x->block[i + 0].bmi.mv.as_mv, x);
  340. clamp_mv_to_umv_border(&x->block[i + 1].bmi.mv.as_mv, x);
  341. }
  342. if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
  343. build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
  344. base_pre, dst_stride);
  345. } else {
  346. build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride,
  347. base_pre, dst_stride, x->subpixel_predict);
  348. build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride,
  349. base_pre, dst_stride, x->subpixel_predict);
  350. }
  351. }
  352. }
  353. base_dst = x->dst.u_buffer;
  354. base_pre = x->pre.u_buffer;
  355. for (i = 16; i < 20; i += 2) {
  356. BLOCKD *d0 = &x->block[i];
  357. BLOCKD *d1 = &x->block[i + 1];
  358. int dst_stride = x->dst.uv_stride;
  359. /* Note: uv mvs already clamped in build_4x4uvmvs() */
  360. if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
  361. build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
  362. base_pre, dst_stride);
  363. } else {
  364. build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre,
  365. dst_stride, x->subpixel_predict);
  366. build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre,
  367. dst_stride, x->subpixel_predict);
  368. }
  369. }
  370. base_dst = x->dst.v_buffer;
  371. base_pre = x->pre.v_buffer;
  372. for (i = 20; i < 24; i += 2) {
  373. BLOCKD *d0 = &x->block[i];
  374. BLOCKD *d1 = &x->block[i + 1];
  375. int dst_stride = x->dst.uv_stride;
  376. /* Note: uv mvs already clamped in build_4x4uvmvs() */
  377. if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
  378. build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
  379. base_pre, dst_stride);
  380. } else {
  381. build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre,
  382. dst_stride, x->subpixel_predict);
  383. build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre,
  384. dst_stride, x->subpixel_predict);
  385. }
  386. }
  387. }
  388. static void build_4x4uvmvs(MACROBLOCKD *x) {
  389. int i, j;
  390. for (i = 0; i < 2; ++i) {
  391. for (j = 0; j < 2; ++j) {
  392. int yoffset = i * 8 + j * 2;
  393. int uoffset = 16 + i * 2 + j;
  394. int voffset = 20 + i * 2 + j;
  395. int temp;
  396. temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row +
  397. x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row +
  398. x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row +
  399. x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
  400. temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
  401. x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
  402. temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col +
  403. x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col +
  404. x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col +
  405. x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
  406. temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
  407. x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
  408. if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
  409. clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
  410. }
  411. x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
  412. }
  413. }
  414. }
  415. void vp8_build_inter_predictors_mb(MACROBLOCKD *xd) {
  416. if (xd->mode_info_context->mbmi.mode != SPLITMV) {
  417. vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer,
  418. xd->dst.v_buffer, xd->dst.y_stride,
  419. xd->dst.uv_stride);
  420. } else {
  421. build_4x4uvmvs(xd);
  422. build_inter4x4_predictors_mb(xd);
  423. }
  424. }