vp9_ethread.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "vp9/encoder/vp9_encodeframe.h"
  11. #include "vp9/encoder/vp9_encoder.h"
  12. #include "vp9/encoder/vp9_ethread.h"
  13. #include "vp9/encoder/vp9_firstpass.h"
  14. #include "vp9/encoder/vp9_multi_thread.h"
  15. #include "vp9/encoder/vp9_temporal_filter.h"
  16. #include "vpx_dsp/vpx_dsp_common.h"
  17. static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
  18. int i, j, k, l, m, n;
  19. for (i = 0; i < REFERENCE_MODES; i++)
  20. td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i];
  21. for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
  22. td->rd_counts.filter_diff[i] += td_t->rd_counts.filter_diff[i];
  23. for (i = 0; i < TX_SIZES; i++)
  24. for (j = 0; j < PLANE_TYPES; j++)
  25. for (k = 0; k < REF_TYPES; k++)
  26. for (l = 0; l < COEF_BANDS; l++)
  27. for (m = 0; m < COEFF_CONTEXTS; m++)
  28. for (n = 0; n < ENTROPY_TOKENS; n++)
  29. td->rd_counts.coef_counts[i][j][k][l][m][n] +=
  30. td_t->rd_counts.coef_counts[i][j][k][l][m][n];
  31. }
  32. static int enc_worker_hook(void *arg1, void *unused) {
  33. EncWorkerData *const thread_data = (EncWorkerData *)arg1;
  34. VP9_COMP *const cpi = thread_data->cpi;
  35. const VP9_COMMON *const cm = &cpi->common;
  36. const int tile_cols = 1 << cm->log2_tile_cols;
  37. const int tile_rows = 1 << cm->log2_tile_rows;
  38. int t;
  39. (void)unused;
  40. for (t = thread_data->start; t < tile_rows * tile_cols;
  41. t += cpi->num_workers) {
  42. int tile_row = t / tile_cols;
  43. int tile_col = t % tile_cols;
  44. vp9_encode_tile(cpi, thread_data->td, tile_row, tile_col);
  45. }
  46. return 0;
  47. }
  48. static int get_max_tile_cols(VP9_COMP *cpi) {
  49. const int aligned_width = ALIGN_POWER_OF_TWO(cpi->oxcf.width, MI_SIZE_LOG2);
  50. int mi_cols = aligned_width >> MI_SIZE_LOG2;
  51. int min_log2_tile_cols, max_log2_tile_cols;
  52. int log2_tile_cols;
  53. vp9_get_tile_n_bits(mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
  54. log2_tile_cols =
  55. clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
  56. if (cpi->oxcf.target_level == LEVEL_AUTO) {
  57. const int level_tile_cols =
  58. log_tile_cols_from_picsize_level(cpi->common.width, cpi->common.height);
  59. if (log2_tile_cols > level_tile_cols) {
  60. log2_tile_cols = VPXMAX(level_tile_cols, min_log2_tile_cols);
  61. }
  62. }
  63. return (1 << log2_tile_cols);
  64. }
  65. static void create_enc_workers(VP9_COMP *cpi, int num_workers) {
  66. VP9_COMMON *const cm = &cpi->common;
  67. const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
  68. int i;
  69. // Only run once to create threads and allocate thread data.
  70. if (cpi->num_workers == 0) {
  71. int allocated_workers = num_workers;
  72. // While using SVC, we need to allocate threads according to the highest
  73. // resolution. When row based multithreading is enabled, it is OK to
  74. // allocate more threads than the number of max tile columns.
  75. if (cpi->use_svc && !cpi->row_mt) {
  76. int max_tile_cols = get_max_tile_cols(cpi);
  77. allocated_workers = VPXMIN(cpi->oxcf.max_threads, max_tile_cols);
  78. }
  79. CHECK_MEM_ERROR(cm, cpi->workers,
  80. vpx_malloc(allocated_workers * sizeof(*cpi->workers)));
  81. CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
  82. vpx_calloc(allocated_workers, sizeof(*cpi->tile_thr_data)));
  83. for (i = 0; i < allocated_workers; i++) {
  84. VPxWorker *const worker = &cpi->workers[i];
  85. EncWorkerData *thread_data = &cpi->tile_thr_data[i];
  86. ++cpi->num_workers;
  87. winterface->init(worker);
  88. if (i < allocated_workers - 1) {
  89. thread_data->cpi = cpi;
  90. // Allocate thread data.
  91. CHECK_MEM_ERROR(cm, thread_data->td,
  92. vpx_memalign(32, sizeof(*thread_data->td)));
  93. vp9_zero(*thread_data->td);
  94. // Set up pc_tree.
  95. thread_data->td->leaf_tree = NULL;
  96. thread_data->td->pc_tree = NULL;
  97. vp9_setup_pc_tree(cm, thread_data->td);
  98. // Allocate frame counters in thread data.
  99. CHECK_MEM_ERROR(cm, thread_data->td->counts,
  100. vpx_calloc(1, sizeof(*thread_data->td->counts)));
  101. // Create threads
  102. if (!winterface->reset(worker))
  103. vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
  104. "Tile encoder thread creation failed");
  105. } else {
  106. // Main thread acts as a worker and uses the thread data in cpi.
  107. thread_data->cpi = cpi;
  108. thread_data->td = &cpi->td;
  109. }
  110. winterface->sync(worker);
  111. }
  112. }
  113. }
  114. static void launch_enc_workers(VP9_COMP *cpi, VPxWorkerHook hook, void *data2,
  115. int num_workers) {
  116. const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
  117. int i;
  118. for (i = 0; i < num_workers; i++) {
  119. VPxWorker *const worker = &cpi->workers[i];
  120. worker->hook = hook;
  121. worker->data1 = &cpi->tile_thr_data[i];
  122. worker->data2 = data2;
  123. }
  124. // Encode a frame
  125. for (i = 0; i < num_workers; i++) {
  126. VPxWorker *const worker = &cpi->workers[i];
  127. EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
  128. // Set the starting tile for each thread.
  129. thread_data->start = i;
  130. if (i == cpi->num_workers - 1)
  131. winterface->execute(worker);
  132. else
  133. winterface->launch(worker);
  134. }
  135. // Encoding ends.
  136. for (i = 0; i < num_workers; i++) {
  137. VPxWorker *const worker = &cpi->workers[i];
  138. winterface->sync(worker);
  139. }
  140. }
  141. void vp9_encode_tiles_mt(VP9_COMP *cpi) {
  142. VP9_COMMON *const cm = &cpi->common;
  143. const int tile_cols = 1 << cm->log2_tile_cols;
  144. const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);
  145. int i;
  146. vp9_init_tile_data(cpi);
  147. create_enc_workers(cpi, num_workers);
  148. for (i = 0; i < num_workers; i++) {
  149. EncWorkerData *thread_data;
  150. thread_data = &cpi->tile_thr_data[i];
  151. // Before encoding a frame, copy the thread data from cpi.
  152. if (thread_data->td != &cpi->td) {
  153. thread_data->td->mb = cpi->td.mb;
  154. thread_data->td->rd_counts = cpi->td.rd_counts;
  155. }
  156. if (thread_data->td->counts != &cpi->common.counts) {
  157. memcpy(thread_data->td->counts, &cpi->common.counts,
  158. sizeof(cpi->common.counts));
  159. }
  160. // Handle use_nonrd_pick_mode case.
  161. if (cpi->sf.use_nonrd_pick_mode) {
  162. MACROBLOCK *const x = &thread_data->td->mb;
  163. MACROBLOCKD *const xd = &x->e_mbd;
  164. struct macroblock_plane *const p = x->plane;
  165. struct macroblockd_plane *const pd = xd->plane;
  166. PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none;
  167. int j;
  168. for (j = 0; j < MAX_MB_PLANE; ++j) {
  169. p[j].coeff = ctx->coeff_pbuf[j][0];
  170. p[j].qcoeff = ctx->qcoeff_pbuf[j][0];
  171. pd[j].dqcoeff = ctx->dqcoeff_pbuf[j][0];
  172. p[j].eobs = ctx->eobs_pbuf[j][0];
  173. }
  174. }
  175. }
  176. launch_enc_workers(cpi, enc_worker_hook, NULL, num_workers);
  177. for (i = 0; i < num_workers; i++) {
  178. VPxWorker *const worker = &cpi->workers[i];
  179. EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
  180. // Accumulate counters.
  181. if (i < cpi->num_workers - 1) {
  182. vp9_accumulate_frame_counts(&cm->counts, thread_data->td->counts, 0);
  183. accumulate_rd_opt(&cpi->td, thread_data->td);
  184. }
  185. }
  186. }
  187. #if !CONFIG_REALTIME_ONLY
  188. static void accumulate_fp_tile_stat(TileDataEnc *tile_data,
  189. TileDataEnc *tile_data_t) {
  190. tile_data->fp_data.intra_factor += tile_data_t->fp_data.intra_factor;
  191. tile_data->fp_data.brightness_factor +=
  192. tile_data_t->fp_data.brightness_factor;
  193. tile_data->fp_data.coded_error += tile_data_t->fp_data.coded_error;
  194. tile_data->fp_data.sr_coded_error += tile_data_t->fp_data.sr_coded_error;
  195. tile_data->fp_data.frame_noise_energy +=
  196. tile_data_t->fp_data.frame_noise_energy;
  197. tile_data->fp_data.intra_error += tile_data_t->fp_data.intra_error;
  198. tile_data->fp_data.intercount += tile_data_t->fp_data.intercount;
  199. tile_data->fp_data.second_ref_count += tile_data_t->fp_data.second_ref_count;
  200. tile_data->fp_data.neutral_count += tile_data_t->fp_data.neutral_count;
  201. tile_data->fp_data.intra_count_low += tile_data_t->fp_data.intra_count_low;
  202. tile_data->fp_data.intra_count_high += tile_data_t->fp_data.intra_count_high;
  203. tile_data->fp_data.intra_skip_count += tile_data_t->fp_data.intra_skip_count;
  204. tile_data->fp_data.mvcount += tile_data_t->fp_data.mvcount;
  205. tile_data->fp_data.sum_mvr += tile_data_t->fp_data.sum_mvr;
  206. tile_data->fp_data.sum_mvr_abs += tile_data_t->fp_data.sum_mvr_abs;
  207. tile_data->fp_data.sum_mvc += tile_data_t->fp_data.sum_mvc;
  208. tile_data->fp_data.sum_mvc_abs += tile_data_t->fp_data.sum_mvc_abs;
  209. tile_data->fp_data.sum_mvrs += tile_data_t->fp_data.sum_mvrs;
  210. tile_data->fp_data.sum_mvcs += tile_data_t->fp_data.sum_mvcs;
  211. tile_data->fp_data.sum_in_vectors += tile_data_t->fp_data.sum_in_vectors;
  212. tile_data->fp_data.intra_smooth_count +=
  213. tile_data_t->fp_data.intra_smooth_count;
  214. tile_data->fp_data.image_data_start_row =
  215. VPXMIN(tile_data->fp_data.image_data_start_row,
  216. tile_data_t->fp_data.image_data_start_row) == INVALID_ROW
  217. ? VPXMAX(tile_data->fp_data.image_data_start_row,
  218. tile_data_t->fp_data.image_data_start_row)
  219. : VPXMIN(tile_data->fp_data.image_data_start_row,
  220. tile_data_t->fp_data.image_data_start_row);
  221. }
  222. #endif // !CONFIG_REALTIME_ONLY
  223. // Allocate memory for row synchronization
  224. void vp9_row_mt_sync_mem_alloc(VP9RowMTSync *row_mt_sync, VP9_COMMON *cm,
  225. int rows) {
  226. row_mt_sync->rows = rows;
  227. #if CONFIG_MULTITHREAD
  228. {
  229. int i;
  230. CHECK_MEM_ERROR(cm, row_mt_sync->mutex,
  231. vpx_malloc(sizeof(*row_mt_sync->mutex) * rows));
  232. if (row_mt_sync->mutex) {
  233. for (i = 0; i < rows; ++i) {
  234. pthread_mutex_init(&row_mt_sync->mutex[i], NULL);
  235. }
  236. }
  237. CHECK_MEM_ERROR(cm, row_mt_sync->cond,
  238. vpx_malloc(sizeof(*row_mt_sync->cond) * rows));
  239. if (row_mt_sync->cond) {
  240. for (i = 0; i < rows; ++i) {
  241. pthread_cond_init(&row_mt_sync->cond[i], NULL);
  242. }
  243. }
  244. }
  245. #endif // CONFIG_MULTITHREAD
  246. CHECK_MEM_ERROR(cm, row_mt_sync->cur_col,
  247. vpx_malloc(sizeof(*row_mt_sync->cur_col) * rows));
  248. // Set up nsync.
  249. row_mt_sync->sync_range = 1;
  250. }
  251. // Deallocate row based multi-threading synchronization related mutex and data
  252. void vp9_row_mt_sync_mem_dealloc(VP9RowMTSync *row_mt_sync) {
  253. if (row_mt_sync != NULL) {
  254. #if CONFIG_MULTITHREAD
  255. int i;
  256. if (row_mt_sync->mutex != NULL) {
  257. for (i = 0; i < row_mt_sync->rows; ++i) {
  258. pthread_mutex_destroy(&row_mt_sync->mutex[i]);
  259. }
  260. vpx_free(row_mt_sync->mutex);
  261. }
  262. if (row_mt_sync->cond != NULL) {
  263. for (i = 0; i < row_mt_sync->rows; ++i) {
  264. pthread_cond_destroy(&row_mt_sync->cond[i]);
  265. }
  266. vpx_free(row_mt_sync->cond);
  267. }
  268. #endif // CONFIG_MULTITHREAD
  269. vpx_free(row_mt_sync->cur_col);
  270. // clear the structure as the source of this call may be dynamic change
  271. // in tiles in which case this call will be followed by an _alloc()
  272. // which may fail.
  273. vp9_zero(*row_mt_sync);
  274. }
  275. }
  276. void vp9_row_mt_sync_read(VP9RowMTSync *const row_mt_sync, int r, int c) {
  277. #if CONFIG_MULTITHREAD
  278. const int nsync = row_mt_sync->sync_range;
  279. if (r && !(c & (nsync - 1))) {
  280. pthread_mutex_t *const mutex = &row_mt_sync->mutex[r - 1];
  281. pthread_mutex_lock(mutex);
  282. while (c > row_mt_sync->cur_col[r - 1] - nsync + 1) {
  283. pthread_cond_wait(&row_mt_sync->cond[r - 1], mutex);
  284. }
  285. pthread_mutex_unlock(mutex);
  286. }
  287. #else
  288. (void)row_mt_sync;
  289. (void)r;
  290. (void)c;
  291. #endif // CONFIG_MULTITHREAD
  292. }
  293. void vp9_row_mt_sync_read_dummy(VP9RowMTSync *const row_mt_sync, int r, int c) {
  294. (void)row_mt_sync;
  295. (void)r;
  296. (void)c;
  297. return;
  298. }
  299. void vp9_row_mt_sync_write(VP9RowMTSync *const row_mt_sync, int r, int c,
  300. const int cols) {
  301. #if CONFIG_MULTITHREAD
  302. const int nsync = row_mt_sync->sync_range;
  303. int cur;
  304. // Only signal when there are enough encoded blocks for next row to run.
  305. int sig = 1;
  306. if (c < cols - 1) {
  307. cur = c;
  308. if (c % nsync != nsync - 1) sig = 0;
  309. } else {
  310. cur = cols + nsync;
  311. }
  312. if (sig) {
  313. pthread_mutex_lock(&row_mt_sync->mutex[r]);
  314. row_mt_sync->cur_col[r] = cur;
  315. pthread_cond_signal(&row_mt_sync->cond[r]);
  316. pthread_mutex_unlock(&row_mt_sync->mutex[r]);
  317. }
  318. #else
  319. (void)row_mt_sync;
  320. (void)r;
  321. (void)c;
  322. (void)cols;
  323. #endif // CONFIG_MULTITHREAD
  324. }
  325. void vp9_row_mt_sync_write_dummy(VP9RowMTSync *const row_mt_sync, int r, int c,
  326. const int cols) {
  327. (void)row_mt_sync;
  328. (void)r;
  329. (void)c;
  330. (void)cols;
  331. return;
  332. }
  333. #if !CONFIG_REALTIME_ONLY
  334. static int first_pass_worker_hook(void *arg1, void *arg2) {
  335. EncWorkerData *const thread_data = (EncWorkerData *)arg1;
  336. MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
  337. VP9_COMP *const cpi = thread_data->cpi;
  338. const VP9_COMMON *const cm = &cpi->common;
  339. const int tile_cols = 1 << cm->log2_tile_cols;
  340. int tile_row, tile_col;
  341. TileDataEnc *this_tile;
  342. int end_of_frame;
  343. int thread_id = thread_data->thread_id;
  344. int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
  345. JobNode *proc_job = NULL;
  346. FIRSTPASS_DATA fp_acc_data;
  347. MV zero_mv = { 0, 0 };
  348. MV best_ref_mv;
  349. int mb_row;
  350. end_of_frame = 0;
  351. while (0 == end_of_frame) {
  352. // Get the next job in the queue
  353. proc_job =
  354. (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
  355. if (NULL == proc_job) {
  356. // Query for the status of other tiles
  357. end_of_frame = vp9_get_tiles_proc_status(
  358. multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
  359. tile_cols);
  360. } else {
  361. tile_col = proc_job->tile_col_id;
  362. tile_row = proc_job->tile_row_id;
  363. this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
  364. mb_row = proc_job->vert_unit_row_num;
  365. best_ref_mv = zero_mv;
  366. vp9_zero(fp_acc_data);
  367. fp_acc_data.image_data_start_row = INVALID_ROW;
  368. vp9_first_pass_encode_tile_mb_row(cpi, thread_data->td, &fp_acc_data,
  369. this_tile, &best_ref_mv, mb_row);
  370. }
  371. }
  372. return 0;
  373. }
  374. void vp9_encode_fp_row_mt(VP9_COMP *cpi) {
  375. VP9_COMMON *const cm = &cpi->common;
  376. const int tile_cols = 1 << cm->log2_tile_cols;
  377. const int tile_rows = 1 << cm->log2_tile_rows;
  378. MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
  379. TileDataEnc *first_tile_col;
  380. int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);
  381. int i;
  382. if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
  383. multi_thread_ctxt->allocated_tile_rows < tile_rows ||
  384. multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
  385. vp9_row_mt_mem_dealloc(cpi);
  386. vp9_init_tile_data(cpi);
  387. vp9_row_mt_mem_alloc(cpi);
  388. } else {
  389. vp9_init_tile_data(cpi);
  390. }
  391. create_enc_workers(cpi, num_workers);
  392. vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
  393. vp9_prepare_job_queue(cpi, FIRST_PASS_JOB);
  394. vp9_multi_thread_tile_init(cpi);
  395. for (i = 0; i < num_workers; i++) {
  396. EncWorkerData *thread_data;
  397. thread_data = &cpi->tile_thr_data[i];
  398. // Before encoding a frame, copy the thread data from cpi.
  399. if (thread_data->td != &cpi->td) {
  400. thread_data->td->mb = cpi->td.mb;
  401. }
  402. }
  403. launch_enc_workers(cpi, first_pass_worker_hook, multi_thread_ctxt,
  404. num_workers);
  405. first_tile_col = &cpi->tile_data[0];
  406. for (i = 1; i < tile_cols; i++) {
  407. TileDataEnc *this_tile = &cpi->tile_data[i];
  408. accumulate_fp_tile_stat(first_tile_col, this_tile);
  409. }
  410. }
  411. static int temporal_filter_worker_hook(void *arg1, void *arg2) {
  412. EncWorkerData *const thread_data = (EncWorkerData *)arg1;
  413. MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
  414. VP9_COMP *const cpi = thread_data->cpi;
  415. const VP9_COMMON *const cm = &cpi->common;
  416. const int tile_cols = 1 << cm->log2_tile_cols;
  417. int tile_row, tile_col;
  418. int mb_col_start, mb_col_end;
  419. TileDataEnc *this_tile;
  420. int end_of_frame;
  421. int thread_id = thread_data->thread_id;
  422. int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
  423. JobNode *proc_job = NULL;
  424. int mb_row;
  425. end_of_frame = 0;
  426. while (0 == end_of_frame) {
  427. // Get the next job in the queue
  428. proc_job =
  429. (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
  430. if (NULL == proc_job) {
  431. // Query for the status of other tiles
  432. end_of_frame = vp9_get_tiles_proc_status(
  433. multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
  434. tile_cols);
  435. } else {
  436. tile_col = proc_job->tile_col_id;
  437. tile_row = proc_job->tile_row_id;
  438. this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
  439. mb_col_start = (this_tile->tile_info.mi_col_start) >> TF_SHIFT;
  440. mb_col_end = (this_tile->tile_info.mi_col_end + TF_ROUND) >> TF_SHIFT;
  441. mb_row = proc_job->vert_unit_row_num;
  442. vp9_temporal_filter_iterate_row_c(cpi, thread_data->td, mb_row,
  443. mb_col_start, mb_col_end);
  444. }
  445. }
  446. return 0;
  447. }
  448. void vp9_temporal_filter_row_mt(VP9_COMP *cpi) {
  449. VP9_COMMON *const cm = &cpi->common;
  450. const int tile_cols = 1 << cm->log2_tile_cols;
  451. const int tile_rows = 1 << cm->log2_tile_rows;
  452. MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
  453. int num_workers = cpi->num_workers ? cpi->num_workers : 1;
  454. int i;
  455. if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
  456. multi_thread_ctxt->allocated_tile_rows < tile_rows ||
  457. multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
  458. vp9_row_mt_mem_dealloc(cpi);
  459. vp9_init_tile_data(cpi);
  460. vp9_row_mt_mem_alloc(cpi);
  461. } else {
  462. vp9_init_tile_data(cpi);
  463. }
  464. create_enc_workers(cpi, num_workers);
  465. vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
  466. vp9_prepare_job_queue(cpi, ARNR_JOB);
  467. for (i = 0; i < num_workers; i++) {
  468. EncWorkerData *thread_data;
  469. thread_data = &cpi->tile_thr_data[i];
  470. // Before encoding a frame, copy the thread data from cpi.
  471. if (thread_data->td != &cpi->td) {
  472. thread_data->td->mb = cpi->td.mb;
  473. }
  474. }
  475. launch_enc_workers(cpi, temporal_filter_worker_hook, multi_thread_ctxt,
  476. num_workers);
  477. }
  478. #endif // !CONFIG_REALTIME_ONLY
  479. static int enc_row_mt_worker_hook(void *arg1, void *arg2) {
  480. EncWorkerData *const thread_data = (EncWorkerData *)arg1;
  481. MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
  482. VP9_COMP *const cpi = thread_data->cpi;
  483. const VP9_COMMON *const cm = &cpi->common;
  484. const int tile_cols = 1 << cm->log2_tile_cols;
  485. int tile_row, tile_col;
  486. int end_of_frame;
  487. int thread_id = thread_data->thread_id;
  488. int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
  489. JobNode *proc_job = NULL;
  490. int mi_row;
  491. end_of_frame = 0;
  492. while (0 == end_of_frame) {
  493. // Get the next job in the queue
  494. proc_job =
  495. (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
  496. if (NULL == proc_job) {
  497. // Query for the status of other tiles
  498. end_of_frame = vp9_get_tiles_proc_status(
  499. multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
  500. tile_cols);
  501. } else {
  502. tile_col = proc_job->tile_col_id;
  503. tile_row = proc_job->tile_row_id;
  504. mi_row = proc_job->vert_unit_row_num * MI_BLOCK_SIZE;
  505. vp9_encode_sb_row(cpi, thread_data->td, tile_row, tile_col, mi_row);
  506. }
  507. }
  508. return 0;
  509. }
  510. void vp9_encode_tiles_row_mt(VP9_COMP *cpi) {
  511. VP9_COMMON *const cm = &cpi->common;
  512. const int tile_cols = 1 << cm->log2_tile_cols;
  513. const int tile_rows = 1 << cm->log2_tile_rows;
  514. MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
  515. int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);
  516. int i;
  517. if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
  518. multi_thread_ctxt->allocated_tile_rows < tile_rows ||
  519. multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
  520. vp9_row_mt_mem_dealloc(cpi);
  521. vp9_init_tile_data(cpi);
  522. vp9_row_mt_mem_alloc(cpi);
  523. } else {
  524. vp9_init_tile_data(cpi);
  525. }
  526. create_enc_workers(cpi, num_workers);
  527. vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
  528. vp9_prepare_job_queue(cpi, ENCODE_JOB);
  529. vp9_multi_thread_tile_init(cpi);
  530. for (i = 0; i < num_workers; i++) {
  531. EncWorkerData *thread_data;
  532. thread_data = &cpi->tile_thr_data[i];
  533. // Before encoding a frame, copy the thread data from cpi.
  534. if (thread_data->td != &cpi->td) {
  535. thread_data->td->mb = cpi->td.mb;
  536. thread_data->td->rd_counts = cpi->td.rd_counts;
  537. }
  538. if (thread_data->td->counts != &cpi->common.counts) {
  539. memcpy(thread_data->td->counts, &cpi->common.counts,
  540. sizeof(cpi->common.counts));
  541. }
  542. // Handle use_nonrd_pick_mode case.
  543. if (cpi->sf.use_nonrd_pick_mode) {
  544. MACROBLOCK *const x = &thread_data->td->mb;
  545. MACROBLOCKD *const xd = &x->e_mbd;
  546. struct macroblock_plane *const p = x->plane;
  547. struct macroblockd_plane *const pd = xd->plane;
  548. PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none;
  549. int j;
  550. for (j = 0; j < MAX_MB_PLANE; ++j) {
  551. p[j].coeff = ctx->coeff_pbuf[j][0];
  552. p[j].qcoeff = ctx->qcoeff_pbuf[j][0];
  553. pd[j].dqcoeff = ctx->dqcoeff_pbuf[j][0];
  554. p[j].eobs = ctx->eobs_pbuf[j][0];
  555. }
  556. }
  557. }
  558. launch_enc_workers(cpi, enc_row_mt_worker_hook, multi_thread_ctxt,
  559. num_workers);
  560. for (i = 0; i < num_workers; i++) {
  561. VPxWorker *const worker = &cpi->workers[i];
  562. EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
  563. // Accumulate counters.
  564. if (i < cpi->num_workers - 1) {
  565. vp9_accumulate_frame_counts(&cm->counts, thread_data->td->counts, 0);
  566. accumulate_rd_opt(&cpi->td, thread_data->td);
  567. }
  568. }
  569. }