2
0

bitstream.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "vp8/common/header.h"
  11. #include "encodemv.h"
  12. #include "vp8/common/entropymode.h"
  13. #include "vp8/common/findnearmv.h"
  14. #include "mcomp.h"
  15. #include "vp8/common/systemdependent.h"
  16. #include <assert.h>
  17. #include <stdio.h>
  18. #include <limits.h>
  19. #include "vpx/vpx_encoder.h"
  20. #include "vpx_mem/vpx_mem.h"
  21. #include "vpx_ports/system_state.h"
  22. #include "bitstream.h"
  23. #include "defaultcoefcounts.h"
  24. #include "vp8/common/common.h"
  25. const int vp8cx_base_skip_false_prob[128] = {
  26. 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
  27. 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
  28. 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
  29. 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 251, 248, 244, 240,
  30. 236, 232, 229, 225, 221, 217, 213, 208, 204, 199, 194, 190, 187, 183, 179,
  31. 175, 172, 168, 164, 160, 157, 153, 149, 145, 142, 138, 134, 130, 127, 124,
  32. 120, 117, 114, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, 80, 77,
  33. 74, 71, 68, 65, 62, 59, 56, 53, 50, 47, 44, 41, 38, 35, 32,
  34. 30, 28, 26, 24, 22, 20, 18, 16,
  35. };
  36. #if defined(SECTIONBITS_OUTPUT)
  37. unsigned __int64 Sectionbits[500];
  38. #endif
  39. #ifdef MODE_STATS
  40. int count_mb_seg[4] = { 0, 0, 0, 0 };
  41. #endif
  42. static void update_mode(vp8_writer *const w, int n, vp8_token tok[/* n */],
  43. vp8_tree tree, vp8_prob Pnew[/* n-1 */],
  44. vp8_prob Pcur[/* n-1 */],
  45. unsigned int bct[/* n-1 */][2],
  46. const unsigned int num_events[/* n */]) {
  47. unsigned int new_b = 0, old_b = 0;
  48. int i = 0;
  49. vp8_tree_probs_from_distribution(n--, tok, tree, Pnew, bct, num_events, 256,
  50. 1);
  51. do {
  52. new_b += vp8_cost_branch(bct[i], Pnew[i]);
  53. old_b += vp8_cost_branch(bct[i], Pcur[i]);
  54. } while (++i < n);
  55. if (new_b + (n << 8) < old_b) {
  56. int j = 0;
  57. vp8_write_bit(w, 1);
  58. do {
  59. const vp8_prob p = Pnew[j];
  60. vp8_write_literal(w, Pcur[j] = p ? p : 1, 8);
  61. } while (++j < n);
  62. } else
  63. vp8_write_bit(w, 0);
  64. }
  65. static void update_mbintra_mode_probs(VP8_COMP *cpi) {
  66. VP8_COMMON *const x = &cpi->common;
  67. vp8_writer *const w = cpi->bc;
  68. {
  69. vp8_prob Pnew[VP8_YMODES - 1];
  70. unsigned int bct[VP8_YMODES - 1][2];
  71. update_mode(w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree, Pnew,
  72. x->fc.ymode_prob, bct, (unsigned int *)cpi->mb.ymode_count);
  73. }
  74. {
  75. vp8_prob Pnew[VP8_UV_MODES - 1];
  76. unsigned int bct[VP8_UV_MODES - 1][2];
  77. update_mode(w, VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree, Pnew,
  78. x->fc.uv_mode_prob, bct, (unsigned int *)cpi->mb.uv_mode_count);
  79. }
  80. }
  81. static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
  82. vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
  83. }
  84. static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
  85. vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m);
  86. }
  87. static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p) {
  88. vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m);
  89. }
  90. static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p) {
  91. vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m);
  92. }
  93. static void write_split(vp8_writer *bc, int x) {
  94. vp8_write_token(bc, vp8_mbsplit_tree, vp8_mbsplit_probs,
  95. vp8_mbsplit_encodings + x);
  96. }
  97. void vp8_pack_tokens(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
  98. const TOKENEXTRA *stop = p + xcount;
  99. unsigned int split;
  100. int shift;
  101. int count = w->count;
  102. unsigned int range = w->range;
  103. unsigned int lowvalue = w->lowvalue;
  104. while (p < stop) {
  105. const int t = p->Token;
  106. vp8_token *a = vp8_coef_encodings + t;
  107. const vp8_extra_bit_struct *b = vp8_extra_bits + t;
  108. int i = 0;
  109. const unsigned char *pp = p->context_tree;
  110. int v = a->value;
  111. int n = a->Len;
  112. if (p->skip_eob_node) {
  113. n--;
  114. i = 2;
  115. }
  116. do {
  117. const int bb = (v >> --n) & 1;
  118. split = 1 + (((range - 1) * pp[i >> 1]) >> 8);
  119. i = vp8_coef_tree[i + bb];
  120. if (bb) {
  121. lowvalue += split;
  122. range = range - split;
  123. } else {
  124. range = split;
  125. }
  126. shift = vp8_norm[range];
  127. range <<= shift;
  128. count += shift;
  129. if (count >= 0) {
  130. int offset = shift - count;
  131. if ((lowvalue << (offset - 1)) & 0x80000000) {
  132. int x = w->pos - 1;
  133. while (x >= 0 && w->buffer[x] == 0xff) {
  134. w->buffer[x] = (unsigned char)0;
  135. x--;
  136. }
  137. w->buffer[x] += 1;
  138. }
  139. validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error);
  140. w->buffer[w->pos++] = (lowvalue >> (24 - offset));
  141. lowvalue <<= offset;
  142. shift = count;
  143. lowvalue &= 0xffffff;
  144. count -= 8;
  145. }
  146. lowvalue <<= shift;
  147. } while (n);
  148. if (b->base_val) {
  149. const int e = p->Extra, L = b->Len;
  150. if (L) {
  151. const unsigned char *proba = b->prob;
  152. const int v2 = e >> 1;
  153. int n2 = L; /* number of bits in v2, assumed nonzero */
  154. i = 0;
  155. do {
  156. const int bb = (v2 >> --n2) & 1;
  157. split = 1 + (((range - 1) * proba[i >> 1]) >> 8);
  158. i = b->tree[i + bb];
  159. if (bb) {
  160. lowvalue += split;
  161. range = range - split;
  162. } else {
  163. range = split;
  164. }
  165. shift = vp8_norm[range];
  166. range <<= shift;
  167. count += shift;
  168. if (count >= 0) {
  169. int offset = shift - count;
  170. if ((lowvalue << (offset - 1)) & 0x80000000) {
  171. int x = w->pos - 1;
  172. while (x >= 0 && w->buffer[x] == 0xff) {
  173. w->buffer[x] = (unsigned char)0;
  174. x--;
  175. }
  176. w->buffer[x] += 1;
  177. }
  178. validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error);
  179. w->buffer[w->pos++] = (lowvalue >> (24 - offset));
  180. lowvalue <<= offset;
  181. shift = count;
  182. lowvalue &= 0xffffff;
  183. count -= 8;
  184. }
  185. lowvalue <<= shift;
  186. } while (n2);
  187. }
  188. {
  189. split = (range + 1) >> 1;
  190. if (e & 1) {
  191. lowvalue += split;
  192. range = range - split;
  193. } else {
  194. range = split;
  195. }
  196. range <<= 1;
  197. if ((lowvalue & 0x80000000)) {
  198. int x = w->pos - 1;
  199. while (x >= 0 && w->buffer[x] == 0xff) {
  200. w->buffer[x] = (unsigned char)0;
  201. x--;
  202. }
  203. w->buffer[x] += 1;
  204. }
  205. lowvalue <<= 1;
  206. if (!++count) {
  207. count = -8;
  208. validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error);
  209. w->buffer[w->pos++] = (lowvalue >> 24);
  210. lowvalue &= 0xffffff;
  211. }
  212. }
  213. }
  214. ++p;
  215. }
  216. w->count = count;
  217. w->lowvalue = lowvalue;
  218. w->range = range;
  219. }
  220. static void write_partition_size(unsigned char *cx_data, int size) {
  221. signed char csize;
  222. csize = size & 0xff;
  223. *cx_data = csize;
  224. csize = (size >> 8) & 0xff;
  225. *(cx_data + 1) = csize;
  226. csize = (size >> 16) & 0xff;
  227. *(cx_data + 2) = csize;
  228. }
  229. static void pack_tokens_into_partitions(VP8_COMP *cpi, unsigned char *cx_data,
  230. unsigned char *cx_data_end,
  231. int num_part) {
  232. int i;
  233. unsigned char *ptr = cx_data;
  234. unsigned char *ptr_end = cx_data_end;
  235. vp8_writer *w;
  236. for (i = 0; i < num_part; ++i) {
  237. int mb_row;
  238. w = cpi->bc + i + 1;
  239. vp8_start_encode(w, ptr, ptr_end);
  240. for (mb_row = i; mb_row < cpi->common.mb_rows; mb_row += num_part) {
  241. const TOKENEXTRA *p = cpi->tplist[mb_row].start;
  242. const TOKENEXTRA *stop = cpi->tplist[mb_row].stop;
  243. int tokens = (int)(stop - p);
  244. vp8_pack_tokens(w, p, tokens);
  245. }
  246. vp8_stop_encode(w);
  247. ptr += w->pos;
  248. }
  249. }
  250. #if CONFIG_MULTITHREAD
  251. static void pack_mb_row_tokens(VP8_COMP *cpi, vp8_writer *w) {
  252. int mb_row;
  253. for (mb_row = 0; mb_row < cpi->common.mb_rows; ++mb_row) {
  254. const TOKENEXTRA *p = cpi->tplist[mb_row].start;
  255. const TOKENEXTRA *stop = cpi->tplist[mb_row].stop;
  256. int tokens = (int)(stop - p);
  257. vp8_pack_tokens(w, p, tokens);
  258. }
  259. }
  260. #endif // CONFIG_MULTITHREAD
  261. static void write_mv_ref(vp8_writer *w, MB_PREDICTION_MODE m,
  262. const vp8_prob *p) {
  263. assert(NEARESTMV <= m && m <= SPLITMV);
  264. vp8_write_token(w, vp8_mv_ref_tree, p,
  265. vp8_mv_ref_encoding_array + (m - NEARESTMV));
  266. }
  267. static void write_sub_mv_ref(vp8_writer *w, B_PREDICTION_MODE m,
  268. const vp8_prob *p) {
  269. assert(LEFT4X4 <= m && m <= NEW4X4);
  270. vp8_write_token(w, vp8_sub_mv_ref_tree, p,
  271. vp8_sub_mv_ref_encoding_array + (m - LEFT4X4));
  272. }
  273. static void write_mv(vp8_writer *w, const MV *mv, const int_mv *ref,
  274. const MV_CONTEXT *mvc) {
  275. MV e;
  276. e.row = mv->row - ref->as_mv.row;
  277. e.col = mv->col - ref->as_mv.col;
  278. vp8_encode_motion_vector(w, &e, mvc);
  279. }
  280. static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi,
  281. const MACROBLOCKD *x) {
  282. /* Encode the MB segment id. */
  283. if (x->segmentation_enabled && x->update_mb_segmentation_map) {
  284. switch (mi->segment_id) {
  285. case 0:
  286. vp8_write(w, 0, x->mb_segment_tree_probs[0]);
  287. vp8_write(w, 0, x->mb_segment_tree_probs[1]);
  288. break;
  289. case 1:
  290. vp8_write(w, 0, x->mb_segment_tree_probs[0]);
  291. vp8_write(w, 1, x->mb_segment_tree_probs[1]);
  292. break;
  293. case 2:
  294. vp8_write(w, 1, x->mb_segment_tree_probs[0]);
  295. vp8_write(w, 0, x->mb_segment_tree_probs[2]);
  296. break;
  297. case 3:
  298. vp8_write(w, 1, x->mb_segment_tree_probs[0]);
  299. vp8_write(w, 1, x->mb_segment_tree_probs[2]);
  300. break;
  301. /* TRAP.. This should not happen */
  302. default:
  303. vp8_write(w, 0, x->mb_segment_tree_probs[0]);
  304. vp8_write(w, 0, x->mb_segment_tree_probs[1]);
  305. break;
  306. }
  307. }
  308. }
  309. void vp8_convert_rfct_to_prob(VP8_COMP *const cpi) {
  310. const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
  311. const int rf_intra = rfct[INTRA_FRAME];
  312. const int rf_inter =
  313. rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
  314. /* Calculate the probabilities used to code the ref frame based on usage */
  315. if (!(cpi->prob_intra_coded = rf_intra * 255 / (rf_intra + rf_inter))) {
  316. cpi->prob_intra_coded = 1;
  317. }
  318. cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
  319. if (!cpi->prob_last_coded) cpi->prob_last_coded = 1;
  320. cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
  321. ? (rfct[GOLDEN_FRAME] * 255) /
  322. (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
  323. : 128;
  324. if (!cpi->prob_gf_coded) cpi->prob_gf_coded = 1;
  325. }
  326. static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
  327. VP8_COMMON *const pc = &cpi->common;
  328. vp8_writer *const w = cpi->bc;
  329. const MV_CONTEXT *mvc = pc->fc.mvc;
  330. MODE_INFO *m = pc->mi;
  331. const int mis = pc->mode_info_stride;
  332. int mb_row = -1;
  333. int prob_skip_false = 0;
  334. cpi->mb.partition_info = cpi->mb.pi;
  335. vp8_convert_rfct_to_prob(cpi);
  336. if (pc->mb_no_coeff_skip) {
  337. int total_mbs = pc->mb_rows * pc->mb_cols;
  338. prob_skip_false = (total_mbs - cpi->mb.skip_true_count) * 256 / total_mbs;
  339. if (prob_skip_false <= 1) prob_skip_false = 1;
  340. if (prob_skip_false > 255) prob_skip_false = 255;
  341. cpi->prob_skip_false = prob_skip_false;
  342. vp8_write_literal(w, prob_skip_false, 8);
  343. }
  344. vp8_write_literal(w, cpi->prob_intra_coded, 8);
  345. vp8_write_literal(w, cpi->prob_last_coded, 8);
  346. vp8_write_literal(w, cpi->prob_gf_coded, 8);
  347. update_mbintra_mode_probs(cpi);
  348. vp8_write_mvprobs(cpi);
  349. while (++mb_row < pc->mb_rows) {
  350. int mb_col = -1;
  351. while (++mb_col < pc->mb_cols) {
  352. const MB_MODE_INFO *const mi = &m->mbmi;
  353. const MV_REFERENCE_FRAME rf = mi->ref_frame;
  354. const MB_PREDICTION_MODE mode = mi->mode;
  355. MACROBLOCKD *xd = &cpi->mb.e_mbd;
  356. /* Distance of Mb to the various image edges.
  357. * These specified to 8th pel as they are always compared to MV
  358. * values that are in 1/8th pel units
  359. */
  360. xd->mb_to_left_edge = -((mb_col * 16) << 3);
  361. xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
  362. xd->mb_to_top_edge = -((mb_row * 16) << 3);
  363. xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
  364. if (cpi->mb.e_mbd.update_mb_segmentation_map) {
  365. write_mb_features(w, mi, &cpi->mb.e_mbd);
  366. }
  367. if (pc->mb_no_coeff_skip) {
  368. vp8_encode_bool(w, m->mbmi.mb_skip_coeff, prob_skip_false);
  369. }
  370. if (rf == INTRA_FRAME) {
  371. vp8_write(w, 0, cpi->prob_intra_coded);
  372. write_ymode(w, mode, pc->fc.ymode_prob);
  373. if (mode == B_PRED) {
  374. int j = 0;
  375. do {
  376. write_bmode(w, m->bmi[j].as_mode, pc->fc.bmode_prob);
  377. } while (++j < 16);
  378. }
  379. write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob);
  380. } else { /* inter coded */
  381. int_mv best_mv;
  382. vp8_prob mv_ref_p[VP8_MVREFS - 1];
  383. vp8_write(w, 1, cpi->prob_intra_coded);
  384. if (rf == LAST_FRAME)
  385. vp8_write(w, 0, cpi->prob_last_coded);
  386. else {
  387. vp8_write(w, 1, cpi->prob_last_coded);
  388. vp8_write(w, (rf == GOLDEN_FRAME) ? 0 : 1, cpi->prob_gf_coded);
  389. }
  390. {
  391. int_mv n1, n2;
  392. int ct[4];
  393. vp8_find_near_mvs(xd, m, &n1, &n2, &best_mv, ct, rf,
  394. cpi->common.ref_frame_sign_bias);
  395. vp8_clamp_mv2(&best_mv, xd);
  396. vp8_mv_ref_probs(mv_ref_p, ct);
  397. }
  398. write_mv_ref(w, mode, mv_ref_p);
  399. switch (mode) /* new, split require MVs */
  400. {
  401. case NEWMV: write_mv(w, &mi->mv.as_mv, &best_mv, mvc); break;
  402. case SPLITMV: {
  403. int j = 0;
  404. #ifdef MODE_STATS
  405. ++count_mb_seg[mi->partitioning];
  406. #endif
  407. write_split(w, mi->partitioning);
  408. do {
  409. B_PREDICTION_MODE blockmode;
  410. int_mv blockmv;
  411. const int *const L = vp8_mbsplits[mi->partitioning];
  412. int k = -1; /* first block in subset j */
  413. int mv_contz;
  414. int_mv leftmv, abovemv;
  415. blockmode = cpi->mb.partition_info->bmi[j].mode;
  416. blockmv = cpi->mb.partition_info->bmi[j].mv;
  417. while (j != L[++k]) {
  418. assert(k < 16);
  419. }
  420. leftmv.as_int = left_block_mv(m, k);
  421. abovemv.as_int = above_block_mv(m, k, mis);
  422. mv_contz = vp8_mv_cont(&leftmv, &abovemv);
  423. write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2[mv_contz]);
  424. if (blockmode == NEW4X4) {
  425. write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *)mvc);
  426. }
  427. } while (++j < cpi->mb.partition_info->count);
  428. break;
  429. }
  430. default: break;
  431. }
  432. }
  433. ++m;
  434. cpi->mb.partition_info++;
  435. }
  436. ++m; /* skip L prediction border */
  437. cpi->mb.partition_info++;
  438. }
  439. }
  440. static void write_kfmodes(VP8_COMP *cpi) {
  441. vp8_writer *const bc = cpi->bc;
  442. const VP8_COMMON *const c = &cpi->common;
  443. /* const */
  444. MODE_INFO *m = c->mi;
  445. int mb_row = -1;
  446. int prob_skip_false = 0;
  447. if (c->mb_no_coeff_skip) {
  448. int total_mbs = c->mb_rows * c->mb_cols;
  449. prob_skip_false = (total_mbs - cpi->mb.skip_true_count) * 256 / total_mbs;
  450. if (prob_skip_false <= 1) prob_skip_false = 1;
  451. if (prob_skip_false >= 255) prob_skip_false = 255;
  452. cpi->prob_skip_false = prob_skip_false;
  453. vp8_write_literal(bc, prob_skip_false, 8);
  454. }
  455. while (++mb_row < c->mb_rows) {
  456. int mb_col = -1;
  457. while (++mb_col < c->mb_cols) {
  458. const int ym = m->mbmi.mode;
  459. if (cpi->mb.e_mbd.update_mb_segmentation_map) {
  460. write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
  461. }
  462. if (c->mb_no_coeff_skip) {
  463. vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
  464. }
  465. kfwrite_ymode(bc, ym, vp8_kf_ymode_prob);
  466. if (ym == B_PRED) {
  467. const int mis = c->mode_info_stride;
  468. int i = 0;
  469. do {
  470. const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
  471. const B_PREDICTION_MODE L = left_block_mode(m, i);
  472. const int bm = m->bmi[i].as_mode;
  473. write_bmode(bc, bm, vp8_kf_bmode_prob[A][L]);
  474. } while (++i < 16);
  475. }
  476. write_uv_mode(bc, (m++)->mbmi.uv_mode, vp8_kf_uv_mode_prob);
  477. }
  478. m++; /* skip L prediction border */
  479. }
  480. }
  481. #if 0
  482. /* This function is used for debugging probability trees. */
  483. static void print_prob_tree(vp8_prob
  484. coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES])
  485. {
  486. /* print coef probability tree */
  487. int i,j,k,l;
  488. FILE* f = fopen("enc_tree_probs.txt", "a");
  489. fprintf(f, "{\n");
  490. for (i = 0; i < BLOCK_TYPES; ++i)
  491. {
  492. fprintf(f, " {\n");
  493. for (j = 0; j < COEF_BANDS; ++j)
  494. {
  495. fprintf(f, " {\n");
  496. for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
  497. {
  498. fprintf(f, " {");
  499. for (l = 0; l < ENTROPY_NODES; ++l)
  500. {
  501. fprintf(f, "%3u, ",
  502. (unsigned int)(coef_probs [i][j][k][l]));
  503. }
  504. fprintf(f, " }\n");
  505. }
  506. fprintf(f, " }\n");
  507. }
  508. fprintf(f, " }\n");
  509. }
  510. fprintf(f, "}\n");
  511. fclose(f);
  512. }
  513. #endif
  514. static void sum_probs_over_prev_coef_context(
  515. const unsigned int probs[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS],
  516. unsigned int *out) {
  517. int i, j;
  518. for (i = 0; i < MAX_ENTROPY_TOKENS; ++i) {
  519. for (j = 0; j < PREV_COEF_CONTEXTS; ++j) {
  520. const unsigned int tmp = out[i];
  521. out[i] += probs[j][i];
  522. /* check for wrap */
  523. if (out[i] < tmp) out[i] = UINT_MAX;
  524. }
  525. }
  526. }
  527. static int prob_update_savings(const unsigned int *ct, const vp8_prob oldp,
  528. const vp8_prob newp, const vp8_prob upd) {
  529. const int old_b = vp8_cost_branch(ct, oldp);
  530. const int new_b = vp8_cost_branch(ct, newp);
  531. const int update_b = 8 + ((vp8_cost_one(upd) - vp8_cost_zero(upd)) >> 8);
  532. return old_b - new_b - update_b;
  533. }
  534. static int independent_coef_context_savings(VP8_COMP *cpi) {
  535. MACROBLOCK *const x = &cpi->mb;
  536. int savings = 0;
  537. int i = 0;
  538. do {
  539. int j = 0;
  540. do {
  541. int k = 0;
  542. unsigned int prev_coef_count_sum[MAX_ENTROPY_TOKENS] = { 0 };
  543. int prev_coef_savings[MAX_ENTROPY_TOKENS] = { 0 };
  544. const unsigned int(*probs)[MAX_ENTROPY_TOKENS];
  545. /* Calculate new probabilities given the constraint that
  546. * they must be equal over the prev coef contexts
  547. */
  548. probs = (const unsigned int(*)[MAX_ENTROPY_TOKENS])x->coef_counts[i][j];
  549. /* Reset to default probabilities at key frames */
  550. if (cpi->common.frame_type == KEY_FRAME) {
  551. probs = default_coef_counts[i][j];
  552. }
  553. sum_probs_over_prev_coef_context(probs, prev_coef_count_sum);
  554. do {
  555. /* at every context */
  556. /* calc probs and branch cts for this frame only */
  557. int t = 0; /* token/prob index */
  558. vp8_tree_probs_from_distribution(
  559. MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
  560. cpi->frame_coef_probs[i][j][k], cpi->frame_branch_ct[i][j][k],
  561. prev_coef_count_sum, 256, 1);
  562. do {
  563. const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t];
  564. const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
  565. const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t];
  566. const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
  567. const int s = prob_update_savings(ct, oldp, newp, upd);
  568. if (cpi->common.frame_type != KEY_FRAME ||
  569. (cpi->common.frame_type == KEY_FRAME && newp != oldp)) {
  570. prev_coef_savings[t] += s;
  571. }
  572. } while (++t < ENTROPY_NODES);
  573. } while (++k < PREV_COEF_CONTEXTS);
  574. k = 0;
  575. do {
  576. /* We only update probabilities if we can save bits, except
  577. * for key frames where we have to update all probabilities
  578. * to get the equal probabilities across the prev coef
  579. * contexts.
  580. */
  581. if (prev_coef_savings[k] > 0 || cpi->common.frame_type == KEY_FRAME) {
  582. savings += prev_coef_savings[k];
  583. }
  584. } while (++k < ENTROPY_NODES);
  585. } while (++j < COEF_BANDS);
  586. } while (++i < BLOCK_TYPES);
  587. return savings;
  588. }
  589. static int default_coef_context_savings(VP8_COMP *cpi) {
  590. MACROBLOCK *const x = &cpi->mb;
  591. int savings = 0;
  592. int i = 0;
  593. do {
  594. int j = 0;
  595. do {
  596. int k = 0;
  597. do {
  598. /* at every context */
  599. /* calc probs and branch cts for this frame only */
  600. int t = 0; /* token/prob index */
  601. vp8_tree_probs_from_distribution(
  602. MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
  603. cpi->frame_coef_probs[i][j][k], cpi->frame_branch_ct[i][j][k],
  604. x->coef_counts[i][j][k], 256, 1);
  605. do {
  606. const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t];
  607. const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
  608. const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t];
  609. const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
  610. const int s = prob_update_savings(ct, oldp, newp, upd);
  611. if (s > 0) {
  612. savings += s;
  613. }
  614. } while (++t < ENTROPY_NODES);
  615. } while (++k < PREV_COEF_CONTEXTS);
  616. } while (++j < COEF_BANDS);
  617. } while (++i < BLOCK_TYPES);
  618. return savings;
  619. }
  620. void vp8_calc_ref_frame_costs(int *ref_frame_cost, int prob_intra,
  621. int prob_last, int prob_garf) {
  622. assert(prob_intra >= 0);
  623. assert(prob_intra <= 255);
  624. assert(prob_last >= 0);
  625. assert(prob_last <= 255);
  626. assert(prob_garf >= 0);
  627. assert(prob_garf <= 255);
  628. ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(prob_intra);
  629. ref_frame_cost[LAST_FRAME] =
  630. vp8_cost_one(prob_intra) + vp8_cost_zero(prob_last);
  631. ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(prob_intra) +
  632. vp8_cost_one(prob_last) +
  633. vp8_cost_zero(prob_garf);
  634. ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(prob_intra) +
  635. vp8_cost_one(prob_last) +
  636. vp8_cost_one(prob_garf);
  637. }
  638. int vp8_estimate_entropy_savings(VP8_COMP *cpi) {
  639. int savings = 0;
  640. const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
  641. const int rf_intra = rfct[INTRA_FRAME];
  642. const int rf_inter =
  643. rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
  644. int new_intra, new_last, new_garf, oldtotal, newtotal;
  645. int ref_frame_cost[MAX_REF_FRAMES];
  646. vpx_clear_system_state();
  647. if (cpi->common.frame_type != KEY_FRAME) {
  648. if (!(new_intra = rf_intra * 255 / (rf_intra + rf_inter))) new_intra = 1;
  649. new_last = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
  650. new_garf = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
  651. ? (rfct[GOLDEN_FRAME] * 255) /
  652. (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
  653. : 128;
  654. vp8_calc_ref_frame_costs(ref_frame_cost, new_intra, new_last, new_garf);
  655. newtotal = rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] +
  656. rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] +
  657. rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] +
  658. rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
  659. /* old costs */
  660. vp8_calc_ref_frame_costs(ref_frame_cost, cpi->prob_intra_coded,
  661. cpi->prob_last_coded, cpi->prob_gf_coded);
  662. oldtotal = rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] +
  663. rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] +
  664. rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] +
  665. rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
  666. savings += (oldtotal - newtotal) / 256;
  667. }
  668. if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
  669. savings += independent_coef_context_savings(cpi);
  670. } else {
  671. savings += default_coef_context_savings(cpi);
  672. }
  673. return savings;
  674. }
  675. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  676. int vp8_update_coef_context(VP8_COMP *cpi) {
  677. int savings = 0;
  678. if (cpi->common.frame_type == KEY_FRAME) {
  679. /* Reset to default counts/probabilities at key frames */
  680. vp8_copy(cpi->mb.coef_counts, default_coef_counts);
  681. }
  682. if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
  683. savings += independent_coef_context_savings(cpi);
  684. else
  685. savings += default_coef_context_savings(cpi);
  686. return savings;
  687. }
  688. #endif
  689. void vp8_update_coef_probs(VP8_COMP *cpi) {
  690. int i = 0;
  691. #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
  692. vp8_writer *const w = cpi->bc;
  693. #endif
  694. int savings = 0;
  695. vpx_clear_system_state();
  696. do {
  697. int j = 0;
  698. do {
  699. int k = 0;
  700. int prev_coef_savings[ENTROPY_NODES] = { 0 };
  701. if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
  702. for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
  703. int t; /* token/prob index */
  704. for (t = 0; t < ENTROPY_NODES; ++t) {
  705. const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t];
  706. const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
  707. const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t];
  708. const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
  709. prev_coef_savings[t] += prob_update_savings(ct, oldp, newp, upd);
  710. }
  711. }
  712. k = 0;
  713. }
  714. do {
  715. /* note: use result from vp8_estimate_entropy_savings, so no
  716. * need to call vp8_tree_probs_from_distribution here.
  717. */
  718. /* at every context */
  719. /* calc probs and branch cts for this frame only */
  720. int t = 0; /* token/prob index */
  721. do {
  722. const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
  723. vp8_prob *Pold = cpi->common.fc.coef_probs[i][j][k] + t;
  724. const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
  725. int s = prev_coef_savings[t];
  726. int u = 0;
  727. if (!(cpi->oxcf.error_resilient_mode &
  728. VPX_ERROR_RESILIENT_PARTITIONS)) {
  729. s = prob_update_savings(cpi->frame_branch_ct[i][j][k][t], *Pold,
  730. newp, upd);
  731. }
  732. if (s > 0) u = 1;
  733. /* Force updates on key frames if the new is different,
  734. * so that we can be sure we end up with equal probabilities
  735. * over the prev coef contexts.
  736. */
  737. if ((cpi->oxcf.error_resilient_mode &
  738. VPX_ERROR_RESILIENT_PARTITIONS) &&
  739. cpi->common.frame_type == KEY_FRAME && newp != *Pold) {
  740. u = 1;
  741. }
  742. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  743. cpi->update_probs[i][j][k][t] = u;
  744. #else
  745. vp8_write(w, u, upd);
  746. #endif
  747. if (u) {
  748. /* send/use new probability */
  749. *Pold = newp;
  750. #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
  751. vp8_write_literal(w, newp, 8);
  752. #endif
  753. savings += s;
  754. }
  755. } while (++t < ENTROPY_NODES);
  756. } while (++k < PREV_COEF_CONTEXTS);
  757. } while (++j < COEF_BANDS);
  758. } while (++i < BLOCK_TYPES);
  759. }
  760. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  761. static void pack_coef_probs(VP8_COMP *cpi) {
  762. int i = 0;
  763. vp8_writer *const w = cpi->bc;
  764. do {
  765. int j = 0;
  766. do {
  767. int k = 0;
  768. do {
  769. int t = 0; /* token/prob index */
  770. do {
  771. const vp8_prob newp = cpi->common.fc.coef_probs[i][j][k][t];
  772. const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
  773. const char u = cpi->update_probs[i][j][k][t];
  774. vp8_write(w, u, upd);
  775. if (u) {
  776. /* send/use new probability */
  777. vp8_write_literal(w, newp, 8);
  778. }
  779. } while (++t < ENTROPY_NODES);
  780. } while (++k < PREV_COEF_CONTEXTS);
  781. } while (++j < COEF_BANDS);
  782. } while (++i < BLOCK_TYPES);
  783. }
  784. #endif
  785. #ifdef PACKET_TESTING
  786. FILE *vpxlogc = 0;
  787. #endif
  788. static void put_delta_q(vp8_writer *bc, int delta_q) {
  789. if (delta_q != 0) {
  790. vp8_write_bit(bc, 1);
  791. vp8_write_literal(bc, abs(delta_q), 4);
  792. if (delta_q < 0)
  793. vp8_write_bit(bc, 1);
  794. else
  795. vp8_write_bit(bc, 0);
  796. } else
  797. vp8_write_bit(bc, 0);
  798. }
  799. void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
  800. unsigned char *dest_end, size_t *size) {
  801. int i, j;
  802. VP8_HEADER oh;
  803. VP8_COMMON *const pc = &cpi->common;
  804. vp8_writer *const bc = cpi->bc;
  805. MACROBLOCKD *const xd = &cpi->mb.e_mbd;
  806. int extra_bytes_packed = 0;
  807. unsigned char *cx_data = dest;
  808. unsigned char *cx_data_end = dest_end;
  809. const int *mb_feature_data_bits;
  810. oh.show_frame = (int)pc->show_frame;
  811. oh.type = (int)pc->frame_type;
  812. oh.version = pc->version;
  813. oh.first_partition_length_in_bytes = 0;
  814. mb_feature_data_bits = vp8_mb_feature_data_bits;
  815. bc[0].error = &pc->error;
  816. validate_buffer(cx_data, 3, cx_data_end, &cpi->common.error);
  817. cx_data += 3;
  818. #if defined(SECTIONBITS_OUTPUT)
  819. Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256;
  820. #endif
  821. /* every keyframe send startcode, width, height, scale factor, clamp
  822. * and color type
  823. */
  824. if (oh.type == KEY_FRAME) {
  825. int v;
  826. validate_buffer(cx_data, 7, cx_data_end, &cpi->common.error);
  827. /* Start / synch code */
  828. cx_data[0] = 0x9D;
  829. cx_data[1] = 0x01;
  830. cx_data[2] = 0x2a;
  831. /* Pack scale and frame size into 16 bits. Store it 8 bits at a time.
  832. * https://tools.ietf.org/html/rfc6386
  833. * 9.1. Uncompressed Data Chunk
  834. * 16 bits : (2 bits Horizontal Scale << 14) | Width (14 bits)
  835. * 16 bits : (2 bits Vertical Scale << 14) | Height (14 bits)
  836. */
  837. v = (pc->horiz_scale << 14) | pc->Width;
  838. cx_data[3] = v & 0xff;
  839. cx_data[4] = v >> 8;
  840. v = (pc->vert_scale << 14) | pc->Height;
  841. cx_data[5] = v & 0xff;
  842. cx_data[6] = v >> 8;
  843. extra_bytes_packed = 7;
  844. cx_data += extra_bytes_packed;
  845. vp8_start_encode(bc, cx_data, cx_data_end);
  846. /* signal clr type */
  847. vp8_write_bit(bc, 0);
  848. vp8_write_bit(bc, pc->clamp_type);
  849. } else {
  850. vp8_start_encode(bc, cx_data, cx_data_end);
  851. }
  852. /* Signal whether or not Segmentation is enabled */
  853. vp8_write_bit(bc, xd->segmentation_enabled);
  854. /* Indicate which features are enabled */
  855. if (xd->segmentation_enabled) {
  856. /* Signal whether or not the segmentation map is being updated. */
  857. vp8_write_bit(bc, xd->update_mb_segmentation_map);
  858. vp8_write_bit(bc, xd->update_mb_segmentation_data);
  859. if (xd->update_mb_segmentation_data) {
  860. signed char Data;
  861. vp8_write_bit(bc, xd->mb_segement_abs_delta);
  862. /* For each segmentation feature (Quant and loop filter level) */
  863. for (i = 0; i < MB_LVL_MAX; ++i) {
  864. /* For each of the segments */
  865. for (j = 0; j < MAX_MB_SEGMENTS; ++j) {
  866. Data = xd->segment_feature_data[i][j];
  867. /* Frame level data */
  868. if (Data) {
  869. vp8_write_bit(bc, 1);
  870. if (Data < 0) {
  871. Data = -Data;
  872. vp8_write_literal(bc, Data, mb_feature_data_bits[i]);
  873. vp8_write_bit(bc, 1);
  874. } else {
  875. vp8_write_literal(bc, Data, mb_feature_data_bits[i]);
  876. vp8_write_bit(bc, 0);
  877. }
  878. } else
  879. vp8_write_bit(bc, 0);
  880. }
  881. }
  882. }
  883. if (xd->update_mb_segmentation_map) {
  884. /* Write the probs used to decode the segment id for each mb */
  885. for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
  886. int Data = xd->mb_segment_tree_probs[i];
  887. if (Data != 255) {
  888. vp8_write_bit(bc, 1);
  889. vp8_write_literal(bc, Data, 8);
  890. } else
  891. vp8_write_bit(bc, 0);
  892. }
  893. }
  894. }
  895. vp8_write_bit(bc, pc->filter_type);
  896. vp8_write_literal(bc, pc->filter_level, 6);
  897. vp8_write_literal(bc, pc->sharpness_level, 3);
  898. /* Write out loop filter deltas applied at the MB level based on mode
  899. * or ref frame (if they are enabled).
  900. */
  901. vp8_write_bit(bc, xd->mode_ref_lf_delta_enabled);
  902. if (xd->mode_ref_lf_delta_enabled) {
  903. /* Do the deltas need to be updated */
  904. int send_update =
  905. xd->mode_ref_lf_delta_update || cpi->oxcf.error_resilient_mode;
  906. vp8_write_bit(bc, send_update);
  907. if (send_update) {
  908. int Data;
  909. /* Send update */
  910. for (i = 0; i < MAX_REF_LF_DELTAS; ++i) {
  911. Data = xd->ref_lf_deltas[i];
  912. /* Frame level data */
  913. if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i] ||
  914. cpi->oxcf.error_resilient_mode) {
  915. xd->last_ref_lf_deltas[i] = xd->ref_lf_deltas[i];
  916. vp8_write_bit(bc, 1);
  917. if (Data > 0) {
  918. vp8_write_literal(bc, (Data & 0x3F), 6);
  919. vp8_write_bit(bc, 0); /* sign */
  920. } else {
  921. Data = -Data;
  922. vp8_write_literal(bc, (Data & 0x3F), 6);
  923. vp8_write_bit(bc, 1); /* sign */
  924. }
  925. } else
  926. vp8_write_bit(bc, 0);
  927. }
  928. /* Send update */
  929. for (i = 0; i < MAX_MODE_LF_DELTAS; ++i) {
  930. Data = xd->mode_lf_deltas[i];
  931. if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i] ||
  932. cpi->oxcf.error_resilient_mode) {
  933. xd->last_mode_lf_deltas[i] = xd->mode_lf_deltas[i];
  934. vp8_write_bit(bc, 1);
  935. if (Data > 0) {
  936. vp8_write_literal(bc, (Data & 0x3F), 6);
  937. vp8_write_bit(bc, 0); /* sign */
  938. } else {
  939. Data = -Data;
  940. vp8_write_literal(bc, (Data & 0x3F), 6);
  941. vp8_write_bit(bc, 1); /* sign */
  942. }
  943. } else
  944. vp8_write_bit(bc, 0);
  945. }
  946. }
  947. }
  948. /* signal here is multi token partition is enabled */
  949. vp8_write_literal(bc, pc->multi_token_partition, 2);
  950. /* Frame Qbaseline quantizer index */
  951. vp8_write_literal(bc, pc->base_qindex, 7);
  952. /* Transmit Dc, Second order and Uv quantizer delta information */
  953. put_delta_q(bc, pc->y1dc_delta_q);
  954. put_delta_q(bc, pc->y2dc_delta_q);
  955. put_delta_q(bc, pc->y2ac_delta_q);
  956. put_delta_q(bc, pc->uvdc_delta_q);
  957. put_delta_q(bc, pc->uvac_delta_q);
  958. /* When there is a key frame all reference buffers are updated using
  959. * the new key frame
  960. */
  961. if (pc->frame_type != KEY_FRAME) {
  962. /* Should the GF or ARF be updated using the transmitted frame
  963. * or buffer
  964. */
  965. vp8_write_bit(bc, pc->refresh_golden_frame);
  966. vp8_write_bit(bc, pc->refresh_alt_ref_frame);
  967. /* If not being updated from current frame should either GF or ARF
  968. * be updated from another buffer
  969. */
  970. if (!pc->refresh_golden_frame)
  971. vp8_write_literal(bc, pc->copy_buffer_to_gf, 2);
  972. if (!pc->refresh_alt_ref_frame)
  973. vp8_write_literal(bc, pc->copy_buffer_to_arf, 2);
  974. /* Indicate reference frame sign bias for Golden and ARF frames
  975. * (always 0 for last frame buffer)
  976. */
  977. vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
  978. vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
  979. }
  980. #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
  981. if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
  982. if (pc->frame_type == KEY_FRAME) {
  983. pc->refresh_entropy_probs = 1;
  984. } else {
  985. pc->refresh_entropy_probs = 0;
  986. }
  987. }
  988. #endif
  989. vp8_write_bit(bc, pc->refresh_entropy_probs);
  990. if (pc->frame_type != KEY_FRAME) vp8_write_bit(bc, pc->refresh_last_frame);
  991. vpx_clear_system_state();
  992. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  993. pack_coef_probs(cpi);
  994. #else
  995. if (pc->refresh_entropy_probs == 0) {
  996. /* save a copy for later refresh */
  997. memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
  998. }
  999. vp8_update_coef_probs(cpi);
  1000. #endif
  1001. /* Write out the mb_no_coeff_skip flag */
  1002. vp8_write_bit(bc, pc->mb_no_coeff_skip);
  1003. if (pc->frame_type == KEY_FRAME) {
  1004. write_kfmodes(cpi);
  1005. } else {
  1006. pack_inter_mode_mvs(cpi);
  1007. }
  1008. vp8_stop_encode(bc);
  1009. cx_data += bc->pos;
  1010. oh.first_partition_length_in_bytes = cpi->bc->pos;
  1011. /* update frame tag */
  1012. {
  1013. /* Pack partition size, show frame, version and frame type into to 24 bits.
  1014. * Store it 8 bits at a time.
  1015. * https://tools.ietf.org/html/rfc6386
  1016. * 9.1. Uncompressed Data Chunk
  1017. * The uncompressed data chunk comprises a common (for key frames and
  1018. * interframes) 3-byte frame tag that contains four fields, as follows:
  1019. *
  1020. * 1. A 1-bit frame type (0 for key frames, 1 for interframes).
  1021. *
  1022. * 2. A 3-bit version number (0 - 3 are defined as four different
  1023. * profiles with different decoding complexity; other values may be
  1024. * defined for future variants of the VP8 data format).
  1025. *
  1026. * 3. A 1-bit show_frame flag (0 when current frame is not for display,
  1027. * 1 when current frame is for display).
  1028. *
  1029. * 4. A 19-bit field containing the size of the first data partition in
  1030. * bytes
  1031. */
  1032. int v = (oh.first_partition_length_in_bytes << 5) | (oh.show_frame << 4) |
  1033. (oh.version << 1) | oh.type;
  1034. dest[0] = v & 0xff;
  1035. dest[1] = (v >> 8) & 0xff;
  1036. dest[2] = v >> 16;
  1037. }
  1038. *size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc->pos;
  1039. cpi->partition_sz[0] = (unsigned int)*size;
  1040. #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
  1041. {
  1042. const int num_part = (1 << pc->multi_token_partition);
  1043. unsigned char *dp = cpi->partition_d[0] + cpi->partition_sz[0];
  1044. if (num_part > 1) {
  1045. /* write token part sizes (all but last) if more than 1 */
  1046. validate_buffer(dp, 3 * (num_part - 1), cpi->partition_d_end[0],
  1047. &pc->error);
  1048. cpi->partition_sz[0] += 3 * (num_part - 1);
  1049. for (i = 1; i < num_part; ++i) {
  1050. write_partition_size(dp, cpi->partition_sz[i]);
  1051. dp += 3;
  1052. }
  1053. }
  1054. if (!cpi->output_partition) {
  1055. /* concatenate partition buffers */
  1056. for (i = 0; i < num_part; ++i) {
  1057. memmove(dp, cpi->partition_d[i + 1], cpi->partition_sz[i + 1]);
  1058. cpi->partition_d[i + 1] = dp;
  1059. dp += cpi->partition_sz[i + 1];
  1060. }
  1061. }
  1062. /* update total size */
  1063. *size = 0;
  1064. for (i = 0; i < num_part + 1; ++i) {
  1065. *size += cpi->partition_sz[i];
  1066. }
  1067. }
  1068. #else
  1069. if (pc->multi_token_partition != ONE_PARTITION) {
  1070. int num_part = 1 << pc->multi_token_partition;
  1071. /* partition size table at the end of first partition */
  1072. cpi->partition_sz[0] += 3 * (num_part - 1);
  1073. *size += 3 * (num_part - 1);
  1074. validate_buffer(cx_data, 3 * (num_part - 1), cx_data_end, &pc->error);
  1075. for (i = 1; i < num_part + 1; ++i) {
  1076. cpi->bc[i].error = &pc->error;
  1077. }
  1078. pack_tokens_into_partitions(cpi, cx_data + 3 * (num_part - 1), cx_data_end,
  1079. num_part);
  1080. for (i = 1; i < num_part; ++i) {
  1081. cpi->partition_sz[i] = cpi->bc[i].pos;
  1082. write_partition_size(cx_data, cpi->partition_sz[i]);
  1083. cx_data += 3;
  1084. *size += cpi->partition_sz[i]; /* add to total */
  1085. }
  1086. /* add last partition to total size */
  1087. cpi->partition_sz[i] = cpi->bc[i].pos;
  1088. *size += cpi->partition_sz[i];
  1089. } else {
  1090. bc[1].error = &pc->error;
  1091. vp8_start_encode(&cpi->bc[1], cx_data, cx_data_end);
  1092. #if CONFIG_MULTITHREAD
  1093. if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
  1094. pack_mb_row_tokens(cpi, &cpi->bc[1]);
  1095. } else {
  1096. vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count);
  1097. }
  1098. #else
  1099. vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count);
  1100. #endif // CONFIG_MULTITHREAD
  1101. vp8_stop_encode(&cpi->bc[1]);
  1102. *size += cpi->bc[1].pos;
  1103. cpi->partition_sz[1] = cpi->bc[1].pos;
  1104. }
  1105. #endif
  1106. }