vp9_encodemb.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "./vp9_rtcd.h"
  11. #include "./vpx_config.h"
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx_dsp/quantize.h"
  14. #include "vpx_mem/vpx_mem.h"
  15. #include "vpx_ports/mem.h"
  16. #include "vp9/common/vp9_idct.h"
  17. #include "vp9/common/vp9_reconinter.h"
  18. #include "vp9/common/vp9_reconintra.h"
  19. #include "vp9/common/vp9_scan.h"
  20. #include "vp9/encoder/vp9_encodemb.h"
  21. #include "vp9/encoder/vp9_rd.h"
  22. #include "vp9/encoder/vp9_tokenize.h"
  23. struct optimize_ctx {
  24. ENTROPY_CONTEXT ta[MAX_MB_PLANE][16];
  25. ENTROPY_CONTEXT tl[MAX_MB_PLANE][16];
  26. };
  27. void vp9_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
  28. struct macroblock_plane *const p = &x->plane[plane];
  29. const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
  30. const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
  31. const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
  32. const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
  33. #if CONFIG_VP9_HIGHBITDEPTH
  34. if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  35. vpx_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
  36. p->src.stride, pd->dst.buf, pd->dst.stride,
  37. x->e_mbd.bd);
  38. return;
  39. }
  40. #endif // CONFIG_VP9_HIGHBITDEPTH
  41. vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
  42. pd->dst.buf, pd->dst.stride);
  43. }
  44. typedef struct vp9_token_state {
  45. int64_t error;
  46. int rate;
  47. int16_t next;
  48. int16_t token;
  49. tran_low_t qc;
  50. tran_low_t dqc;
  51. uint8_t best_index;
  52. } vp9_token_state;
  53. static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
  54. { 10, 6 }, { 8, 5 },
  55. };
  56. #define UPDATE_RD_COST() \
  57. { \
  58. rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); \
  59. rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
  60. }
  61. // This function is a place holder for now but may ultimately need
  62. // to scan previous tokens to work out the correct context.
  63. static int trellis_get_coeff_context(const int16_t *scan, const int16_t *nb,
  64. int idx, int token, uint8_t *token_cache) {
  65. int bak = token_cache[scan[idx]], pt;
  66. token_cache[scan[idx]] = vp9_pt_energy_class[token];
  67. pt = get_coef_context(nb, token_cache, idx + 1);
  68. token_cache[scan[idx]] = bak;
  69. return pt;
  70. }
  71. int vp9_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
  72. int ctx) {
  73. MACROBLOCKD *const xd = &mb->e_mbd;
  74. struct macroblock_plane *const p = &mb->plane[plane];
  75. struct macroblockd_plane *const pd = &xd->plane[plane];
  76. const int ref = is_inter_block(xd->mi[0]);
  77. vp9_token_state tokens[1025][2];
  78. uint8_t token_cache[1024];
  79. const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
  80. tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
  81. tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
  82. const int eob = p->eobs[block];
  83. const PLANE_TYPE type = get_plane_type(plane);
  84. const int default_eob = 16 << (tx_size << 1);
  85. const int shift = (tx_size == TX_32X32);
  86. const int16_t *const dequant_ptr = pd->dequant;
  87. const uint8_t *const band_translate = get_band_translate(tx_size);
  88. const scan_order *const so = get_scan(xd, tx_size, type, block);
  89. const int16_t *const scan = so->scan;
  90. const int16_t *const nb = so->neighbors;
  91. const int dq_step[2] = { dequant_ptr[0] >> shift, dequant_ptr[1] >> shift };
  92. int next = eob, sz = 0;
  93. const int64_t rdmult = (mb->rdmult * plane_rd_mult[ref][type]) >> 1;
  94. const int64_t rddiv = mb->rddiv;
  95. int64_t rd_cost0, rd_cost1;
  96. int rate0, rate1;
  97. int64_t error0, error1;
  98. int16_t t0, t1;
  99. EXTRABIT e0;
  100. int best, band, pt, i, final_eob;
  101. #if CONFIG_VP9_HIGHBITDEPTH
  102. const int *cat6_high_cost = vp9_get_high_cost_table(xd->bd);
  103. #else
  104. const int *cat6_high_cost = vp9_get_high_cost_table(8);
  105. #endif
  106. assert((!type && !plane) || (type && plane));
  107. assert(eob <= default_eob);
  108. /* Now set up a Viterbi trellis to evaluate alternative roundings. */
  109. /* Initialize the sentinel node of the trellis. */
  110. tokens[eob][0].rate = 0;
  111. tokens[eob][0].error = 0;
  112. tokens[eob][0].next = default_eob;
  113. tokens[eob][0].token = EOB_TOKEN;
  114. tokens[eob][0].qc = 0;
  115. tokens[eob][1] = tokens[eob][0];
  116. for (i = 0; i < eob; i++)
  117. token_cache[scan[i]] = vp9_pt_energy_class[vp9_get_token(qcoeff[scan[i]])];
  118. for (i = eob; i-- > 0;) {
  119. int base_bits, d2, dx;
  120. const int rc = scan[i];
  121. int x = qcoeff[rc];
  122. /* Only add a trellis state for non-zero coefficients. */
  123. if (x) {
  124. int shortcut = 0;
  125. error0 = tokens[next][0].error;
  126. error1 = tokens[next][1].error;
  127. /* Evaluate the first possibility for this state. */
  128. rate0 = tokens[next][0].rate;
  129. rate1 = tokens[next][1].rate;
  130. vp9_get_token_extra(x, &t0, &e0);
  131. /* Consider both possible successor states. */
  132. if (next < default_eob) {
  133. band = band_translate[i + 1];
  134. pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
  135. rate0 += mb->token_costs[tx_size][type][ref][band][0][pt]
  136. [tokens[next][0].token];
  137. rate1 += mb->token_costs[tx_size][type][ref][band][0][pt]
  138. [tokens[next][1].token];
  139. }
  140. UPDATE_RD_COST();
  141. /* And pick the best. */
  142. best = rd_cost1 < rd_cost0;
  143. base_bits = vp9_get_cost(t0, e0, cat6_high_cost);
  144. dx = (dqcoeff[rc] - coeff[rc]) * (1 << shift);
  145. #if CONFIG_VP9_HIGHBITDEPTH
  146. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  147. dx >>= xd->bd - 8;
  148. }
  149. #endif // CONFIG_VP9_HIGHBITDEPTH
  150. d2 = dx * dx;
  151. tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
  152. tokens[i][0].error = d2 + (best ? error1 : error0);
  153. tokens[i][0].next = next;
  154. tokens[i][0].token = t0;
  155. tokens[i][0].qc = x;
  156. tokens[i][0].dqc = dqcoeff[rc];
  157. tokens[i][0].best_index = best;
  158. /* Evaluate the second possibility for this state. */
  159. rate0 = tokens[next][0].rate;
  160. rate1 = tokens[next][1].rate;
  161. if ((abs(x) * dequant_ptr[rc != 0] > (abs(coeff[rc]) << shift)) &&
  162. (abs(x) * dequant_ptr[rc != 0] <
  163. (abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]))
  164. shortcut = 1;
  165. else
  166. shortcut = 0;
  167. if (shortcut) {
  168. sz = -(x < 0);
  169. x -= 2 * sz + 1;
  170. } else {
  171. tokens[i][1] = tokens[i][0];
  172. next = i;
  173. continue;
  174. }
  175. /* Consider both possible successor states. */
  176. if (!x) {
  177. /* If we reduced this coefficient to zero, check to see if
  178. * we need to move the EOB back here.
  179. */
  180. t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
  181. t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
  182. e0 = 0;
  183. } else {
  184. vp9_get_token_extra(x, &t0, &e0);
  185. t1 = t0;
  186. }
  187. if (next < default_eob) {
  188. band = band_translate[i + 1];
  189. if (t0 != EOB_TOKEN) {
  190. pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
  191. rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt]
  192. [tokens[next][0].token];
  193. }
  194. if (t1 != EOB_TOKEN) {
  195. pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache);
  196. rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt]
  197. [tokens[next][1].token];
  198. }
  199. }
  200. UPDATE_RD_COST();
  201. /* And pick the best. */
  202. best = rd_cost1 < rd_cost0;
  203. base_bits = vp9_get_cost(t0, e0, cat6_high_cost);
  204. if (shortcut) {
  205. #if CONFIG_VP9_HIGHBITDEPTH
  206. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  207. dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
  208. } else {
  209. dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
  210. }
  211. #else
  212. dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
  213. #endif // CONFIG_VP9_HIGHBITDEPTH
  214. d2 = dx * dx;
  215. }
  216. tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
  217. tokens[i][1].error = d2 + (best ? error1 : error0);
  218. tokens[i][1].next = next;
  219. tokens[i][1].token = best ? t1 : t0;
  220. tokens[i][1].qc = x;
  221. if (x) {
  222. tran_low_t offset = dq_step[rc != 0];
  223. // The 32x32 transform coefficient uses half quantization step size.
  224. // Account for the rounding difference in the dequantized coefficeint
  225. // value when the quantization index is dropped from an even number
  226. // to an odd number.
  227. if (shift & x) offset += (dequant_ptr[rc != 0] & 0x01);
  228. if (sz == 0)
  229. tokens[i][1].dqc = dqcoeff[rc] - offset;
  230. else
  231. tokens[i][1].dqc = dqcoeff[rc] + offset;
  232. } else {
  233. tokens[i][1].dqc = 0;
  234. }
  235. tokens[i][1].best_index = best;
  236. /* Finally, make this the new head of the trellis. */
  237. next = i;
  238. } else {
  239. /* There's no choice to make for a zero coefficient, so we don't
  240. * add a new trellis node, but we do need to update the costs.
  241. */
  242. band = band_translate[i + 1];
  243. pt = get_coef_context(nb, token_cache, i + 1);
  244. t0 = tokens[next][0].token;
  245. t1 = tokens[next][1].token;
  246. /* Update the cost of each path if we're past the EOB token. */
  247. if (t0 != EOB_TOKEN) {
  248. tokens[next][0].rate +=
  249. mb->token_costs[tx_size][type][ref][band][1][pt][t0];
  250. tokens[next][0].token = ZERO_TOKEN;
  251. }
  252. if (t1 != EOB_TOKEN) {
  253. tokens[next][1].rate +=
  254. mb->token_costs[tx_size][type][ref][band][1][pt][t1];
  255. tokens[next][1].token = ZERO_TOKEN;
  256. }
  257. tokens[i][0].best_index = tokens[i][1].best_index = 0;
  258. /* Don't update next, because we didn't add a new node. */
  259. }
  260. }
  261. /* Now pick the best path through the whole trellis. */
  262. band = band_translate[i + 1];
  263. rate0 = tokens[next][0].rate;
  264. rate1 = tokens[next][1].rate;
  265. error0 = tokens[next][0].error;
  266. error1 = tokens[next][1].error;
  267. t0 = tokens[next][0].token;
  268. t1 = tokens[next][1].token;
  269. rate0 += mb->token_costs[tx_size][type][ref][band][0][ctx][t0];
  270. rate1 += mb->token_costs[tx_size][type][ref][band][0][ctx][t1];
  271. UPDATE_RD_COST();
  272. best = rd_cost1 < rd_cost0;
  273. final_eob = -1;
  274. for (i = next; i < eob; i = next) {
  275. const int x = tokens[i][best].qc;
  276. const int rc = scan[i];
  277. if (x) final_eob = i;
  278. qcoeff[rc] = x;
  279. dqcoeff[rc] = tokens[i][best].dqc;
  280. next = tokens[i][best].next;
  281. best = tokens[i][best].best_index;
  282. }
  283. final_eob++;
  284. mb->plane[plane].eobs[block] = final_eob;
  285. return final_eob;
  286. }
  287. static INLINE void fdct32x32(int rd_transform, const int16_t *src,
  288. tran_low_t *dst, int src_stride) {
  289. if (rd_transform)
  290. vpx_fdct32x32_rd(src, dst, src_stride);
  291. else
  292. vpx_fdct32x32(src, dst, src_stride);
  293. }
  294. #if CONFIG_VP9_HIGHBITDEPTH
  295. static INLINE void highbd_fdct32x32(int rd_transform, const int16_t *src,
  296. tran_low_t *dst, int src_stride) {
  297. if (rd_transform)
  298. vpx_highbd_fdct32x32_rd(src, dst, src_stride);
  299. else
  300. vpx_highbd_fdct32x32(src, dst, src_stride);
  301. }
  302. #endif // CONFIG_VP9_HIGHBITDEPTH
  303. void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block, int row, int col,
  304. BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
  305. MACROBLOCKD *const xd = &x->e_mbd;
  306. const struct macroblock_plane *const p = &x->plane[plane];
  307. const struct macroblockd_plane *const pd = &xd->plane[plane];
  308. const scan_order *const scan_order = &vp9_default_scan_orders[tx_size];
  309. tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
  310. tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
  311. tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
  312. uint16_t *const eob = &p->eobs[block];
  313. const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
  314. const int16_t *src_diff;
  315. src_diff = &p->src_diff[4 * (row * diff_stride + col)];
  316. #if CONFIG_VP9_HIGHBITDEPTH
  317. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  318. switch (tx_size) {
  319. case TX_32X32:
  320. highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
  321. vp9_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
  322. p->round_fp, p->quant_fp, p->quant_shift,
  323. qcoeff, dqcoeff, pd->dequant, eob,
  324. scan_order->scan, scan_order->iscan);
  325. break;
  326. case TX_16X16:
  327. vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
  328. vp9_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
  329. p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
  330. pd->dequant, eob, scan_order->scan,
  331. scan_order->iscan);
  332. break;
  333. case TX_8X8:
  334. vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
  335. vp9_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
  336. p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
  337. pd->dequant, eob, scan_order->scan,
  338. scan_order->iscan);
  339. break;
  340. case TX_4X4:
  341. x->fwd_txm4x4(src_diff, coeff, diff_stride);
  342. vp9_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
  343. p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
  344. pd->dequant, eob, scan_order->scan,
  345. scan_order->iscan);
  346. break;
  347. default: assert(0);
  348. }
  349. return;
  350. }
  351. #endif // CONFIG_VP9_HIGHBITDEPTH
  352. switch (tx_size) {
  353. case TX_32X32:
  354. fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
  355. vp9_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
  356. p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
  357. pd->dequant, eob, scan_order->scan,
  358. scan_order->iscan);
  359. break;
  360. case TX_16X16:
  361. vpx_fdct16x16(src_diff, coeff, diff_stride);
  362. vp9_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
  363. p->quant_fp, p->quant_shift, qcoeff, dqcoeff, pd->dequant,
  364. eob, scan_order->scan, scan_order->iscan);
  365. break;
  366. case TX_8X8:
  367. vp9_fdct8x8_quant(src_diff, diff_stride, coeff, 64, x->skip_block,
  368. p->zbin, p->round_fp, p->quant_fp, p->quant_shift,
  369. qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
  370. scan_order->iscan);
  371. break;
  372. case TX_4X4:
  373. x->fwd_txm4x4(src_diff, coeff, diff_stride);
  374. vp9_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
  375. p->quant_fp, p->quant_shift, qcoeff, dqcoeff, pd->dequant,
  376. eob, scan_order->scan, scan_order->iscan);
  377. break;
  378. default: assert(0); break;
  379. }
  380. }
  381. void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, int row, int col,
  382. BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
  383. MACROBLOCKD *const xd = &x->e_mbd;
  384. const struct macroblock_plane *const p = &x->plane[plane];
  385. const struct macroblockd_plane *const pd = &xd->plane[plane];
  386. tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
  387. tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
  388. tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
  389. uint16_t *const eob = &p->eobs[block];
  390. const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
  391. const int16_t *src_diff;
  392. src_diff = &p->src_diff[4 * (row * diff_stride + col)];
  393. #if CONFIG_VP9_HIGHBITDEPTH
  394. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  395. switch (tx_size) {
  396. case TX_32X32:
  397. vpx_highbd_fdct32x32_1(src_diff, coeff, diff_stride);
  398. vpx_highbd_quantize_dc_32x32(coeff, x->skip_block, p->round,
  399. p->quant_fp[0], qcoeff, dqcoeff,
  400. pd->dequant[0], eob);
  401. break;
  402. case TX_16X16:
  403. vpx_highbd_fdct16x16_1(src_diff, coeff, diff_stride);
  404. vpx_highbd_quantize_dc(coeff, 256, x->skip_block, p->round,
  405. p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
  406. eob);
  407. break;
  408. case TX_8X8:
  409. vpx_highbd_fdct8x8_1(src_diff, coeff, diff_stride);
  410. vpx_highbd_quantize_dc(coeff, 64, x->skip_block, p->round,
  411. p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
  412. eob);
  413. break;
  414. case TX_4X4:
  415. x->fwd_txm4x4(src_diff, coeff, diff_stride);
  416. vpx_highbd_quantize_dc(coeff, 16, x->skip_block, p->round,
  417. p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
  418. eob);
  419. break;
  420. default: assert(0);
  421. }
  422. return;
  423. }
  424. #endif // CONFIG_VP9_HIGHBITDEPTH
  425. switch (tx_size) {
  426. case TX_32X32:
  427. vpx_fdct32x32_1(src_diff, coeff, diff_stride);
  428. vpx_quantize_dc_32x32(coeff, x->skip_block, p->round, p->quant_fp[0],
  429. qcoeff, dqcoeff, pd->dequant[0], eob);
  430. break;
  431. case TX_16X16:
  432. vpx_fdct16x16_1(src_diff, coeff, diff_stride);
  433. vpx_quantize_dc(coeff, 256, x->skip_block, p->round, p->quant_fp[0],
  434. qcoeff, dqcoeff, pd->dequant[0], eob);
  435. break;
  436. case TX_8X8:
  437. vpx_fdct8x8_1(src_diff, coeff, diff_stride);
  438. vpx_quantize_dc(coeff, 64, x->skip_block, p->round, p->quant_fp[0],
  439. qcoeff, dqcoeff, pd->dequant[0], eob);
  440. break;
  441. case TX_4X4:
  442. x->fwd_txm4x4(src_diff, coeff, diff_stride);
  443. vpx_quantize_dc(coeff, 16, x->skip_block, p->round, p->quant_fp[0],
  444. qcoeff, dqcoeff, pd->dequant[0], eob);
  445. break;
  446. default: assert(0); break;
  447. }
  448. }
  449. void vp9_xform_quant(MACROBLOCK *x, int plane, int block, int row, int col,
  450. BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
  451. MACROBLOCKD *const xd = &x->e_mbd;
  452. const struct macroblock_plane *const p = &x->plane[plane];
  453. const struct macroblockd_plane *const pd = &xd->plane[plane];
  454. const scan_order *const scan_order = &vp9_default_scan_orders[tx_size];
  455. tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
  456. tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
  457. tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
  458. uint16_t *const eob = &p->eobs[block];
  459. const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
  460. const int16_t *src_diff;
  461. src_diff = &p->src_diff[4 * (row * diff_stride + col)];
  462. #if CONFIG_VP9_HIGHBITDEPTH
  463. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  464. switch (tx_size) {
  465. case TX_32X32:
  466. highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
  467. vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
  468. p->round, p->quant, p->quant_shift, qcoeff,
  469. dqcoeff, pd->dequant, eob, scan_order->scan,
  470. scan_order->iscan);
  471. break;
  472. case TX_16X16:
  473. vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
  474. vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
  475. p->quant, p->quant_shift, qcoeff, dqcoeff,
  476. pd->dequant, eob, scan_order->scan,
  477. scan_order->iscan);
  478. break;
  479. case TX_8X8:
  480. vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
  481. vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
  482. p->quant, p->quant_shift, qcoeff, dqcoeff,
  483. pd->dequant, eob, scan_order->scan,
  484. scan_order->iscan);
  485. break;
  486. case TX_4X4:
  487. x->fwd_txm4x4(src_diff, coeff, diff_stride);
  488. vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
  489. p->quant, p->quant_shift, qcoeff, dqcoeff,
  490. pd->dequant, eob, scan_order->scan,
  491. scan_order->iscan);
  492. break;
  493. default: assert(0);
  494. }
  495. return;
  496. }
  497. #endif // CONFIG_VP9_HIGHBITDEPTH
  498. switch (tx_size) {
  499. case TX_32X32:
  500. fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
  501. vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
  502. p->quant, p->quant_shift, qcoeff, dqcoeff,
  503. pd->dequant, eob, scan_order->scan,
  504. scan_order->iscan);
  505. break;
  506. case TX_16X16:
  507. vpx_fdct16x16(src_diff, coeff, diff_stride);
  508. vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant,
  509. p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
  510. scan_order->scan, scan_order->iscan);
  511. break;
  512. case TX_8X8:
  513. vpx_fdct8x8(src_diff, coeff, diff_stride);
  514. vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
  515. p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
  516. scan_order->scan, scan_order->iscan);
  517. break;
  518. case TX_4X4:
  519. x->fwd_txm4x4(src_diff, coeff, diff_stride);
  520. vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
  521. p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
  522. scan_order->scan, scan_order->iscan);
  523. break;
  524. default: assert(0); break;
  525. }
  526. }
  527. static void encode_block(int plane, int block, int row, int col,
  528. BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
  529. struct encode_b_args *const args = arg;
  530. MACROBLOCK *const x = args->x;
  531. MACROBLOCKD *const xd = &x->e_mbd;
  532. struct macroblock_plane *const p = &x->plane[plane];
  533. struct macroblockd_plane *const pd = &xd->plane[plane];
  534. tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
  535. uint8_t *dst;
  536. ENTROPY_CONTEXT *a, *l;
  537. dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
  538. a = &args->ta[col];
  539. l = &args->tl[row];
  540. // TODO(jingning): per transformed block zero forcing only enabled for
  541. // luma component. will integrate chroma components as well.
  542. if (x->zcoeff_blk[tx_size][block] && plane == 0) {
  543. p->eobs[block] = 0;
  544. *a = *l = 0;
  545. return;
  546. }
  547. if (!x->skip_recode) {
  548. if (x->quant_fp) {
  549. // Encoding process for rtc mode
  550. if (x->skip_txfm[0] == SKIP_TXFM_AC_DC && plane == 0) {
  551. // skip forward transform
  552. p->eobs[block] = 0;
  553. *a = *l = 0;
  554. return;
  555. } else {
  556. vp9_xform_quant_fp(x, plane, block, row, col, plane_bsize, tx_size);
  557. }
  558. } else {
  559. if (max_txsize_lookup[plane_bsize] == tx_size) {
  560. int txfm_blk_index = (plane << 2) + (block >> (tx_size << 1));
  561. if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_NONE) {
  562. // full forward transform and quantization
  563. vp9_xform_quant(x, plane, block, row, col, plane_bsize, tx_size);
  564. } else if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_AC_ONLY) {
  565. // fast path forward transform and quantization
  566. vp9_xform_quant_dc(x, plane, block, row, col, plane_bsize, tx_size);
  567. } else {
  568. // skip forward transform
  569. p->eobs[block] = 0;
  570. *a = *l = 0;
  571. return;
  572. }
  573. } else {
  574. vp9_xform_quant(x, plane, block, row, col, plane_bsize, tx_size);
  575. }
  576. }
  577. }
  578. if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
  579. const int ctx = combine_entropy_contexts(*a, *l);
  580. *a = *l = vp9_optimize_b(x, plane, block, tx_size, ctx) > 0;
  581. } else {
  582. *a = *l = p->eobs[block] > 0;
  583. }
  584. if (p->eobs[block]) *(args->skip) = 0;
  585. if (x->skip_encode || p->eobs[block] == 0) return;
  586. #if CONFIG_VP9_HIGHBITDEPTH
  587. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  588. switch (tx_size) {
  589. case TX_32X32:
  590. vp9_highbd_idct32x32_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
  591. xd->bd);
  592. break;
  593. case TX_16X16:
  594. vp9_highbd_idct16x16_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
  595. xd->bd);
  596. break;
  597. case TX_8X8:
  598. vp9_highbd_idct8x8_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
  599. xd->bd);
  600. break;
  601. case TX_4X4:
  602. // this is like vp9_short_idct4x4 but has a special case around eob<=1
  603. // which is significant (not just an optimization) for the lossless
  604. // case.
  605. x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
  606. xd->bd);
  607. break;
  608. default: assert(0 && "Invalid transform size");
  609. }
  610. return;
  611. }
  612. #endif // CONFIG_VP9_HIGHBITDEPTH
  613. switch (tx_size) {
  614. case TX_32X32:
  615. vp9_idct32x32_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
  616. break;
  617. case TX_16X16:
  618. vp9_idct16x16_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
  619. break;
  620. case TX_8X8:
  621. vp9_idct8x8_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
  622. break;
  623. case TX_4X4:
  624. // this is like vp9_short_idct4x4 but has a special case around eob<=1
  625. // which is significant (not just an optimization) for the lossless
  626. // case.
  627. x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
  628. break;
  629. default: assert(0 && "Invalid transform size"); break;
  630. }
  631. }
  632. static void encode_block_pass1(int plane, int block, int row, int col,
  633. BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
  634. void *arg) {
  635. MACROBLOCK *const x = (MACROBLOCK *)arg;
  636. MACROBLOCKD *const xd = &x->e_mbd;
  637. struct macroblock_plane *const p = &x->plane[plane];
  638. struct macroblockd_plane *const pd = &xd->plane[plane];
  639. tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
  640. uint8_t *dst;
  641. dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
  642. vp9_xform_quant(x, plane, block, row, col, plane_bsize, tx_size);
  643. if (p->eobs[block] > 0) {
  644. #if CONFIG_VP9_HIGHBITDEPTH
  645. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  646. x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block], xd->bd);
  647. return;
  648. }
  649. #endif // CONFIG_VP9_HIGHBITDEPTH
  650. x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
  651. }
  652. }
  653. void vp9_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
  654. vp9_subtract_plane(x, bsize, 0);
  655. vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
  656. encode_block_pass1, x);
  657. }
  658. void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
  659. MACROBLOCKD *const xd = &x->e_mbd;
  660. struct optimize_ctx ctx;
  661. MODE_INFO *mi = xd->mi[0];
  662. struct encode_b_args arg = { x, 1, NULL, NULL, &mi->skip };
  663. int plane;
  664. mi->skip = 1;
  665. if (x->skip) return;
  666. for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
  667. if (!x->skip_recode) vp9_subtract_plane(x, bsize, plane);
  668. if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
  669. const struct macroblockd_plane *const pd = &xd->plane[plane];
  670. const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size;
  671. vp9_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane],
  672. ctx.tl[plane]);
  673. arg.enable_coeff_opt = 1;
  674. } else {
  675. arg.enable_coeff_opt = 0;
  676. }
  677. arg.ta = ctx.ta[plane];
  678. arg.tl = ctx.tl[plane];
  679. vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
  680. &arg);
  681. }
  682. }
  683. void vp9_encode_block_intra(int plane, int block, int row, int col,
  684. BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
  685. void *arg) {
  686. struct encode_b_args *const args = arg;
  687. MACROBLOCK *const x = args->x;
  688. MACROBLOCKD *const xd = &x->e_mbd;
  689. MODE_INFO *mi = xd->mi[0];
  690. struct macroblock_plane *const p = &x->plane[plane];
  691. struct macroblockd_plane *const pd = &xd->plane[plane];
  692. tran_low_t *coeff = BLOCK_OFFSET(p->coeff, block);
  693. tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
  694. tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
  695. const scan_order *scan_order;
  696. TX_TYPE tx_type = DCT_DCT;
  697. PREDICTION_MODE mode;
  698. const int bwl = b_width_log2_lookup[plane_bsize];
  699. const int diff_stride = 4 * (1 << bwl);
  700. uint8_t *src, *dst;
  701. int16_t *src_diff;
  702. uint16_t *eob = &p->eobs[block];
  703. const int src_stride = p->src.stride;
  704. const int dst_stride = pd->dst.stride;
  705. ENTROPY_CONTEXT *a = NULL;
  706. ENTROPY_CONTEXT *l = NULL;
  707. int entropy_ctx = 0;
  708. dst = &pd->dst.buf[4 * (row * dst_stride + col)];
  709. src = &p->src.buf[4 * (row * src_stride + col)];
  710. src_diff = &p->src_diff[4 * (row * diff_stride + col)];
  711. if (args->enable_coeff_opt) {
  712. a = &args->ta[col];
  713. l = &args->tl[row];
  714. entropy_ctx = combine_entropy_contexts(*a, *l);
  715. }
  716. if (tx_size == TX_4X4) {
  717. tx_type = get_tx_type_4x4(get_plane_type(plane), xd, block);
  718. scan_order = &vp9_scan_orders[TX_4X4][tx_type];
  719. mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mi->uv_mode;
  720. } else {
  721. mode = plane == 0 ? mi->mode : mi->uv_mode;
  722. if (tx_size == TX_32X32) {
  723. scan_order = &vp9_default_scan_orders[TX_32X32];
  724. } else {
  725. tx_type = get_tx_type(get_plane_type(plane), xd);
  726. scan_order = &vp9_scan_orders[tx_size][tx_type];
  727. }
  728. }
  729. vp9_predict_intra_block(xd, bwl, tx_size, mode, x->skip_encode ? src : dst,
  730. x->skip_encode ? src_stride : dst_stride, dst,
  731. dst_stride, col, row, plane);
  732. #if CONFIG_VP9_HIGHBITDEPTH
  733. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  734. switch (tx_size) {
  735. case TX_32X32:
  736. if (!x->skip_recode) {
  737. vpx_highbd_subtract_block(32, 32, src_diff, diff_stride, src,
  738. src_stride, dst, dst_stride, xd->bd);
  739. highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
  740. vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
  741. p->round, p->quant, p->quant_shift,
  742. qcoeff, dqcoeff, pd->dequant, eob,
  743. scan_order->scan, scan_order->iscan);
  744. }
  745. if (!x->skip_encode && *eob) {
  746. vp9_highbd_idct32x32_add(dqcoeff, dst, dst_stride, *eob, xd->bd);
  747. }
  748. break;
  749. case TX_16X16:
  750. if (!x->skip_recode) {
  751. vpx_highbd_subtract_block(16, 16, src_diff, diff_stride, src,
  752. src_stride, dst, dst_stride, xd->bd);
  753. if (tx_type == DCT_DCT)
  754. vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
  755. else
  756. vp9_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
  757. vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
  758. p->quant, p->quant_shift, qcoeff, dqcoeff,
  759. pd->dequant, eob, scan_order->scan,
  760. scan_order->iscan);
  761. }
  762. if (!x->skip_encode && *eob) {
  763. vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, dst_stride, *eob,
  764. xd->bd);
  765. }
  766. break;
  767. case TX_8X8:
  768. if (!x->skip_recode) {
  769. vpx_highbd_subtract_block(8, 8, src_diff, diff_stride, src,
  770. src_stride, dst, dst_stride, xd->bd);
  771. if (tx_type == DCT_DCT)
  772. vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
  773. else
  774. vp9_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
  775. vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
  776. p->quant, p->quant_shift, qcoeff, dqcoeff,
  777. pd->dequant, eob, scan_order->scan,
  778. scan_order->iscan);
  779. }
  780. if (!x->skip_encode && *eob) {
  781. vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, dst_stride, *eob,
  782. xd->bd);
  783. }
  784. break;
  785. case TX_4X4:
  786. if (!x->skip_recode) {
  787. vpx_highbd_subtract_block(4, 4, src_diff, diff_stride, src,
  788. src_stride, dst, dst_stride, xd->bd);
  789. if (tx_type != DCT_DCT)
  790. vp9_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
  791. else
  792. x->fwd_txm4x4(src_diff, coeff, diff_stride);
  793. vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
  794. p->quant, p->quant_shift, qcoeff, dqcoeff,
  795. pd->dequant, eob, scan_order->scan,
  796. scan_order->iscan);
  797. }
  798. if (!x->skip_encode && *eob) {
  799. if (tx_type == DCT_DCT) {
  800. // this is like vp9_short_idct4x4 but has a special case around
  801. // eob<=1 which is significant (not just an optimization) for the
  802. // lossless case.
  803. x->highbd_itxm_add(dqcoeff, dst, dst_stride, *eob, xd->bd);
  804. } else {
  805. vp9_highbd_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type, xd->bd);
  806. }
  807. }
  808. break;
  809. default: assert(0); return;
  810. }
  811. if (*eob) *(args->skip) = 0;
  812. return;
  813. }
  814. #endif // CONFIG_VP9_HIGHBITDEPTH
  815. switch (tx_size) {
  816. case TX_32X32:
  817. if (!x->skip_recode) {
  818. vpx_subtract_block(32, 32, src_diff, diff_stride, src, src_stride, dst,
  819. dst_stride);
  820. fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
  821. vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
  822. p->quant, p->quant_shift, qcoeff, dqcoeff,
  823. pd->dequant, eob, scan_order->scan,
  824. scan_order->iscan);
  825. }
  826. if (args->enable_coeff_opt && !x->skip_recode) {
  827. *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0;
  828. }
  829. if (!x->skip_encode && *eob)
  830. vp9_idct32x32_add(dqcoeff, dst, dst_stride, *eob);
  831. break;
  832. case TX_16X16:
  833. if (!x->skip_recode) {
  834. vpx_subtract_block(16, 16, src_diff, diff_stride, src, src_stride, dst,
  835. dst_stride);
  836. vp9_fht16x16(src_diff, coeff, diff_stride, tx_type);
  837. vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant,
  838. p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
  839. scan_order->scan, scan_order->iscan);
  840. }
  841. if (args->enable_coeff_opt && !x->skip_recode) {
  842. *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0;
  843. }
  844. if (!x->skip_encode && *eob)
  845. vp9_iht16x16_add(tx_type, dqcoeff, dst, dst_stride, *eob);
  846. break;
  847. case TX_8X8:
  848. if (!x->skip_recode) {
  849. vpx_subtract_block(8, 8, src_diff, diff_stride, src, src_stride, dst,
  850. dst_stride);
  851. vp9_fht8x8(src_diff, coeff, diff_stride, tx_type);
  852. vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
  853. p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
  854. scan_order->scan, scan_order->iscan);
  855. }
  856. if (args->enable_coeff_opt && !x->skip_recode) {
  857. *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0;
  858. }
  859. if (!x->skip_encode && *eob)
  860. vp9_iht8x8_add(tx_type, dqcoeff, dst, dst_stride, *eob);
  861. break;
  862. case TX_4X4:
  863. if (!x->skip_recode) {
  864. vpx_subtract_block(4, 4, src_diff, diff_stride, src, src_stride, dst,
  865. dst_stride);
  866. if (tx_type != DCT_DCT)
  867. vp9_fht4x4(src_diff, coeff, diff_stride, tx_type);
  868. else
  869. x->fwd_txm4x4(src_diff, coeff, diff_stride);
  870. vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
  871. p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
  872. scan_order->scan, scan_order->iscan);
  873. }
  874. if (args->enable_coeff_opt && !x->skip_recode) {
  875. *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0;
  876. }
  877. if (!x->skip_encode && *eob) {
  878. if (tx_type == DCT_DCT)
  879. // this is like vp9_short_idct4x4 but has a special case around eob<=1
  880. // which is significant (not just an optimization) for the lossless
  881. // case.
  882. x->itxm_add(dqcoeff, dst, dst_stride, *eob);
  883. else
  884. vp9_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type);
  885. }
  886. break;
  887. default: assert(0); break;
  888. }
  889. if (*eob) *(args->skip) = 0;
  890. }
  891. void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
  892. int enable_optimize_b) {
  893. const MACROBLOCKD *const xd = &x->e_mbd;
  894. struct optimize_ctx ctx;
  895. struct encode_b_args arg = { x, enable_optimize_b, ctx.ta[plane],
  896. ctx.tl[plane], &xd->mi[0]->skip };
  897. if (enable_optimize_b && x->optimize &&
  898. (!x->skip_recode || !x->skip_optimize)) {
  899. const struct macroblockd_plane *const pd = &xd->plane[plane];
  900. const TX_SIZE tx_size =
  901. plane ? get_uv_tx_size(xd->mi[0], pd) : xd->mi[0]->tx_size;
  902. vp9_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
  903. } else {
  904. arg.enable_coeff_opt = 0;
  905. }
  906. vp9_foreach_transformed_block_in_plane(xd, bsize, plane,
  907. vp9_encode_block_intra, &arg);
  908. }