vp9_denoiser.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. /*
  2. * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <limits.h>
  12. #include <math.h>
  13. #include "./vpx_dsp_rtcd.h"
  14. #include "vpx_dsp/vpx_dsp_common.h"
  15. #include "vpx_scale/yv12config.h"
  16. #include "vpx/vpx_integer.h"
  17. #include "vp9/common/vp9_reconinter.h"
  18. #include "vp9/encoder/vp9_context_tree.h"
  19. #include "vp9/encoder/vp9_denoiser.h"
  20. #include "vp9/encoder/vp9_encoder.h"
  21. #ifdef OUTPUT_YUV_DENOISED
  22. static void make_grayscale(YV12_BUFFER_CONFIG *yuv);
  23. #endif
  24. static int absdiff_thresh(BLOCK_SIZE bs, int increase_denoising) {
  25. (void)bs;
  26. return 3 + (increase_denoising ? 1 : 0);
  27. }
  28. static int delta_thresh(BLOCK_SIZE bs, int increase_denoising) {
  29. (void)bs;
  30. (void)increase_denoising;
  31. return 4;
  32. }
  33. static int noise_motion_thresh(BLOCK_SIZE bs, int increase_denoising) {
  34. (void)bs;
  35. (void)increase_denoising;
  36. return 625;
  37. }
  38. static unsigned int sse_thresh(BLOCK_SIZE bs, int increase_denoising) {
  39. return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 80 : 40);
  40. }
  41. static int sse_diff_thresh(BLOCK_SIZE bs, int increase_denoising,
  42. int motion_magnitude) {
  43. if (motion_magnitude > noise_motion_thresh(bs, increase_denoising)) {
  44. if (increase_denoising)
  45. return (1 << num_pels_log2_lookup[bs]) << 2;
  46. else
  47. return 0;
  48. } else {
  49. return (1 << num_pels_log2_lookup[bs]) << 4;
  50. }
  51. }
  52. static int total_adj_weak_thresh(BLOCK_SIZE bs, int increase_denoising) {
  53. return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
  54. }
  55. // TODO(jackychen): If increase_denoising is enabled in the future,
  56. // we might need to update the code for calculating 'total_adj' in
  57. // case the C code is not bit-exact with corresponding sse2 code.
  58. int vp9_denoiser_filter_c(const uint8_t *sig, int sig_stride,
  59. const uint8_t *mc_avg, int mc_avg_stride,
  60. uint8_t *avg, int avg_stride, int increase_denoising,
  61. BLOCK_SIZE bs, int motion_magnitude) {
  62. int r, c;
  63. const uint8_t *sig_start = sig;
  64. const uint8_t *mc_avg_start = mc_avg;
  65. uint8_t *avg_start = avg;
  66. int diff, adj, absdiff, delta;
  67. int adj_val[] = { 3, 4, 6 };
  68. int total_adj = 0;
  69. int shift_inc = 1;
  70. // If motion_magnitude is small, making the denoiser more aggressive by
  71. // increasing the adjustment for each level. Add another increment for
  72. // blocks that are labeled for increase denoising.
  73. if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) {
  74. if (increase_denoising) {
  75. shift_inc = 2;
  76. }
  77. adj_val[0] += shift_inc;
  78. adj_val[1] += shift_inc;
  79. adj_val[2] += shift_inc;
  80. }
  81. // First attempt to apply a strong temporal denoising filter.
  82. for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
  83. for (c = 0; c < (4 << b_width_log2_lookup[bs]); ++c) {
  84. diff = mc_avg[c] - sig[c];
  85. absdiff = abs(diff);
  86. if (absdiff <= absdiff_thresh(bs, increase_denoising)) {
  87. avg[c] = mc_avg[c];
  88. total_adj += diff;
  89. } else {
  90. switch (absdiff) {
  91. case 4:
  92. case 5:
  93. case 6:
  94. case 7: adj = adj_val[0]; break;
  95. case 8:
  96. case 9:
  97. case 10:
  98. case 11:
  99. case 12:
  100. case 13:
  101. case 14:
  102. case 15: adj = adj_val[1]; break;
  103. default: adj = adj_val[2];
  104. }
  105. if (diff > 0) {
  106. avg[c] = VPXMIN(UINT8_MAX, sig[c] + adj);
  107. total_adj += adj;
  108. } else {
  109. avg[c] = VPXMAX(0, sig[c] - adj);
  110. total_adj -= adj;
  111. }
  112. }
  113. }
  114. sig += sig_stride;
  115. avg += avg_stride;
  116. mc_avg += mc_avg_stride;
  117. }
  118. // If the strong filter did not modify the signal too much, we're all set.
  119. if (abs(total_adj) <= total_adj_strong_thresh(bs, increase_denoising)) {
  120. return FILTER_BLOCK;
  121. }
  122. // Otherwise, we try to dampen the filter if the delta is not too high.
  123. delta = ((abs(total_adj) - total_adj_strong_thresh(bs, increase_denoising)) >>
  124. num_pels_log2_lookup[bs]) +
  125. 1;
  126. if (delta >= delta_thresh(bs, increase_denoising)) {
  127. return COPY_BLOCK;
  128. }
  129. mc_avg = mc_avg_start;
  130. avg = avg_start;
  131. sig = sig_start;
  132. for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
  133. for (c = 0; c < (4 << b_width_log2_lookup[bs]); ++c) {
  134. diff = mc_avg[c] - sig[c];
  135. adj = abs(diff);
  136. if (adj > delta) {
  137. adj = delta;
  138. }
  139. if (diff > 0) {
  140. // Diff positive means we made positive adjustment above
  141. // (in first try/attempt), so now make negative adjustment to bring
  142. // denoised signal down.
  143. avg[c] = VPXMAX(0, avg[c] - adj);
  144. total_adj -= adj;
  145. } else {
  146. // Diff negative means we made negative adjustment above
  147. // (in first try/attempt), so now make positive adjustment to bring
  148. // denoised signal up.
  149. avg[c] = VPXMIN(UINT8_MAX, avg[c] + adj);
  150. total_adj += adj;
  151. }
  152. }
  153. sig += sig_stride;
  154. avg += avg_stride;
  155. mc_avg += mc_avg_stride;
  156. }
  157. // We can use the filter if it has been sufficiently dampened
  158. if (abs(total_adj) <= total_adj_weak_thresh(bs, increase_denoising)) {
  159. return FILTER_BLOCK;
  160. }
  161. return COPY_BLOCK;
  162. }
  163. static uint8_t *block_start(uint8_t *framebuf, int stride, int mi_row,
  164. int mi_col) {
  165. return framebuf + (stride * mi_row << 3) + (mi_col << 3);
  166. }
  167. static VP9_DENOISER_DECISION perform_motion_compensation(
  168. VP9_DENOISER *denoiser, MACROBLOCK *mb, BLOCK_SIZE bs,
  169. int increase_denoising, int mi_row, int mi_col, PICK_MODE_CONTEXT *ctx,
  170. int motion_magnitude, int is_skin, int *zeromv_filter, int consec_zeromv) {
  171. int sse_diff = ctx->zeromv_sse - ctx->newmv_sse;
  172. MV_REFERENCE_FRAME frame;
  173. MACROBLOCKD *filter_mbd = &mb->e_mbd;
  174. MODE_INFO *mi = filter_mbd->mi[0];
  175. MODE_INFO saved_mi;
  176. int i;
  177. struct buf_2d saved_dst[MAX_MB_PLANE];
  178. struct buf_2d saved_pre[MAX_MB_PLANE];
  179. frame = ctx->best_reference_frame;
  180. saved_mi = *mi;
  181. if (is_skin && (motion_magnitude > 0 || consec_zeromv < 4)) return COPY_BLOCK;
  182. // Avoid denoising for small block (unless motion is small).
  183. // Small blocks are selected in variance partition (before encoding) and
  184. // will typically lie on moving areas.
  185. if (denoiser->denoising_level < kDenHigh && motion_magnitude > 16 &&
  186. bs <= BLOCK_8X8)
  187. return COPY_BLOCK;
  188. // If the best reference frame uses inter-prediction and there is enough of a
  189. // difference in sum-squared-error, use it.
  190. if (frame != INTRA_FRAME && ctx->newmv_sse != UINT_MAX &&
  191. sse_diff > sse_diff_thresh(bs, increase_denoising, motion_magnitude)) {
  192. mi->ref_frame[0] = ctx->best_reference_frame;
  193. mi->mode = ctx->best_sse_inter_mode;
  194. mi->mv[0] = ctx->best_sse_mv;
  195. } else {
  196. // Otherwise, use the zero reference frame.
  197. frame = ctx->best_zeromv_reference_frame;
  198. ctx->newmv_sse = ctx->zeromv_sse;
  199. // Bias to last reference.
  200. if (frame != LAST_FRAME &&
  201. ((ctx->zeromv_lastref_sse<(5 * ctx->zeromv_sse)>> 2) ||
  202. denoiser->denoising_level >= kDenHigh)) {
  203. frame = LAST_FRAME;
  204. ctx->newmv_sse = ctx->zeromv_lastref_sse;
  205. }
  206. mi->ref_frame[0] = frame;
  207. mi->mode = ZEROMV;
  208. mi->mv[0].as_int = 0;
  209. ctx->best_sse_inter_mode = ZEROMV;
  210. ctx->best_sse_mv.as_int = 0;
  211. *zeromv_filter = 1;
  212. if (denoiser->denoising_level > kDenMedium) {
  213. motion_magnitude = 0;
  214. }
  215. }
  216. if (ctx->newmv_sse > sse_thresh(bs, increase_denoising)) {
  217. // Restore everything to its original state
  218. *mi = saved_mi;
  219. return COPY_BLOCK;
  220. }
  221. if (motion_magnitude > (noise_motion_thresh(bs, increase_denoising) << 3)) {
  222. // Restore everything to its original state
  223. *mi = saved_mi;
  224. return COPY_BLOCK;
  225. }
  226. // We will restore these after motion compensation.
  227. for (i = 0; i < MAX_MB_PLANE; ++i) {
  228. saved_pre[i] = filter_mbd->plane[i].pre[0];
  229. saved_dst[i] = filter_mbd->plane[i].dst;
  230. }
  231. // Set the pointers in the MACROBLOCKD to point to the buffers in the denoiser
  232. // struct.
  233. filter_mbd->plane[0].pre[0].buf =
  234. block_start(denoiser->running_avg_y[frame].y_buffer,
  235. denoiser->running_avg_y[frame].y_stride, mi_row, mi_col);
  236. filter_mbd->plane[0].pre[0].stride = denoiser->running_avg_y[frame].y_stride;
  237. filter_mbd->plane[1].pre[0].buf =
  238. block_start(denoiser->running_avg_y[frame].u_buffer,
  239. denoiser->running_avg_y[frame].uv_stride, mi_row, mi_col);
  240. filter_mbd->plane[1].pre[0].stride = denoiser->running_avg_y[frame].uv_stride;
  241. filter_mbd->plane[2].pre[0].buf =
  242. block_start(denoiser->running_avg_y[frame].v_buffer,
  243. denoiser->running_avg_y[frame].uv_stride, mi_row, mi_col);
  244. filter_mbd->plane[2].pre[0].stride = denoiser->running_avg_y[frame].uv_stride;
  245. filter_mbd->plane[0].dst.buf =
  246. block_start(denoiser->mc_running_avg_y.y_buffer,
  247. denoiser->mc_running_avg_y.y_stride, mi_row, mi_col);
  248. filter_mbd->plane[0].dst.stride = denoiser->mc_running_avg_y.y_stride;
  249. filter_mbd->plane[1].dst.buf =
  250. block_start(denoiser->mc_running_avg_y.u_buffer,
  251. denoiser->mc_running_avg_y.uv_stride, mi_row, mi_col);
  252. filter_mbd->plane[1].dst.stride = denoiser->mc_running_avg_y.uv_stride;
  253. filter_mbd->plane[2].dst.buf =
  254. block_start(denoiser->mc_running_avg_y.v_buffer,
  255. denoiser->mc_running_avg_y.uv_stride, mi_row, mi_col);
  256. filter_mbd->plane[2].dst.stride = denoiser->mc_running_avg_y.uv_stride;
  257. vp9_build_inter_predictors_sby(filter_mbd, mi_row, mi_col, bs);
  258. // Restore everything to its original state
  259. *mi = saved_mi;
  260. for (i = 0; i < MAX_MB_PLANE; ++i) {
  261. filter_mbd->plane[i].pre[0] = saved_pre[i];
  262. filter_mbd->plane[i].dst = saved_dst[i];
  263. }
  264. return FILTER_BLOCK;
  265. }
  266. void vp9_denoiser_denoise(VP9_COMP *cpi, MACROBLOCK *mb, int mi_row, int mi_col,
  267. BLOCK_SIZE bs, PICK_MODE_CONTEXT *ctx,
  268. VP9_DENOISER_DECISION *denoiser_decision) {
  269. int mv_col, mv_row;
  270. int motion_magnitude = 0;
  271. int zeromv_filter = 0;
  272. VP9_DENOISER *denoiser = &cpi->denoiser;
  273. VP9_DENOISER_DECISION decision = COPY_BLOCK;
  274. YV12_BUFFER_CONFIG avg = denoiser->running_avg_y[INTRA_FRAME];
  275. YV12_BUFFER_CONFIG mc_avg = denoiser->mc_running_avg_y;
  276. uint8_t *avg_start = block_start(avg.y_buffer, avg.y_stride, mi_row, mi_col);
  277. uint8_t *mc_avg_start =
  278. block_start(mc_avg.y_buffer, mc_avg.y_stride, mi_row, mi_col);
  279. struct buf_2d src = mb->plane[0].src;
  280. int is_skin = 0;
  281. int consec_zeromv = 0;
  282. mv_col = ctx->best_sse_mv.as_mv.col;
  283. mv_row = ctx->best_sse_mv.as_mv.row;
  284. motion_magnitude = mv_row * mv_row + mv_col * mv_col;
  285. if (cpi->use_skin_detection && bs <= BLOCK_32X32 &&
  286. denoiser->denoising_level < kDenHigh) {
  287. int motion_level = (motion_magnitude < 16) ? 0 : 1;
  288. // If motion for current block is small/zero, compute consec_zeromv for
  289. // skin detection (early exit in skin detection is done for large
  290. // consec_zeromv when current block has small/zero motion).
  291. consec_zeromv = 0;
  292. if (motion_level == 0) {
  293. VP9_COMMON *const cm = &cpi->common;
  294. int j, i;
  295. // Loop through the 8x8 sub-blocks.
  296. const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
  297. const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64];
  298. const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
  299. const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
  300. const int block_index = mi_row * cm->mi_cols + mi_col;
  301. consec_zeromv = 100;
  302. for (i = 0; i < ymis; i++) {
  303. for (j = 0; j < xmis; j++) {
  304. int bl_index = block_index + i * cm->mi_cols + j;
  305. consec_zeromv = VPXMIN(cpi->consec_zero_mv[bl_index], consec_zeromv);
  306. // No need to keep checking 8x8 blocks if any of the sub-blocks
  307. // has small consec_zeromv (since threshold for no_skin based on
  308. // zero/small motion in skin detection is high, i.e, > 4).
  309. if (consec_zeromv < 4) {
  310. i = ymis;
  311. j = xmis;
  312. }
  313. }
  314. }
  315. }
  316. // TODO(marpan): Compute skin detection over sub-blocks.
  317. is_skin = vp9_compute_skin_block(
  318. mb->plane[0].src.buf, mb->plane[1].src.buf, mb->plane[2].src.buf,
  319. mb->plane[0].src.stride, mb->plane[1].src.stride, bs, consec_zeromv,
  320. motion_level);
  321. }
  322. if (!is_skin && denoiser->denoising_level == kDenHigh) {
  323. denoiser->increase_denoising = 1;
  324. } else {
  325. denoiser->increase_denoising = 0;
  326. }
  327. if (denoiser->denoising_level >= kDenLow)
  328. decision = perform_motion_compensation(
  329. denoiser, mb, bs, denoiser->increase_denoising, mi_row, mi_col, ctx,
  330. motion_magnitude, is_skin, &zeromv_filter, consec_zeromv);
  331. if (decision == FILTER_BLOCK) {
  332. decision = vp9_denoiser_filter(
  333. src.buf, src.stride, mc_avg_start, mc_avg.y_stride, avg_start,
  334. avg.y_stride, denoiser->increase_denoising, bs, motion_magnitude);
  335. }
  336. if (decision == FILTER_BLOCK) {
  337. vpx_convolve_copy(avg_start, avg.y_stride, src.buf, src.stride, NULL, 0,
  338. NULL, 0, num_4x4_blocks_wide_lookup[bs] << 2,
  339. num_4x4_blocks_high_lookup[bs] << 2);
  340. } else { // COPY_BLOCK
  341. vpx_convolve_copy(src.buf, src.stride, avg_start, avg.y_stride, NULL, 0,
  342. NULL, 0, num_4x4_blocks_wide_lookup[bs] << 2,
  343. num_4x4_blocks_high_lookup[bs] << 2);
  344. }
  345. *denoiser_decision = decision;
  346. if (decision == FILTER_BLOCK && zeromv_filter == 1)
  347. *denoiser_decision = FILTER_ZEROMV_BLOCK;
  348. }
  349. static void copy_frame(YV12_BUFFER_CONFIG *const dest,
  350. const YV12_BUFFER_CONFIG *const src) {
  351. int r;
  352. const uint8_t *srcbuf = src->y_buffer;
  353. uint8_t *destbuf = dest->y_buffer;
  354. assert(dest->y_width == src->y_width);
  355. assert(dest->y_height == src->y_height);
  356. for (r = 0; r < dest->y_height; ++r) {
  357. memcpy(destbuf, srcbuf, dest->y_width);
  358. destbuf += dest->y_stride;
  359. srcbuf += src->y_stride;
  360. }
  361. }
  362. static void swap_frame_buffer(YV12_BUFFER_CONFIG *const dest,
  363. YV12_BUFFER_CONFIG *const src) {
  364. uint8_t *tmp_buf = dest->y_buffer;
  365. assert(dest->y_width == src->y_width);
  366. assert(dest->y_height == src->y_height);
  367. dest->y_buffer = src->y_buffer;
  368. src->y_buffer = tmp_buf;
  369. }
  370. void vp9_denoiser_update_frame_info(VP9_DENOISER *denoiser,
  371. YV12_BUFFER_CONFIG src,
  372. FRAME_TYPE frame_type,
  373. int refresh_alt_ref_frame,
  374. int refresh_golden_frame,
  375. int refresh_last_frame, int resized) {
  376. // Copy source into denoised reference buffers on KEY_FRAME or
  377. // if the just encoded frame was resized.
  378. if (frame_type == KEY_FRAME || resized != 0 || denoiser->reset) {
  379. int i;
  380. // Start at 1 so as not to overwrite the INTRA_FRAME
  381. for (i = 1; i < MAX_REF_FRAMES; ++i)
  382. copy_frame(&denoiser->running_avg_y[i], &src);
  383. denoiser->reset = 0;
  384. return;
  385. }
  386. // If more than one refresh occurs, must copy frame buffer.
  387. if ((refresh_alt_ref_frame + refresh_golden_frame + refresh_last_frame) > 1) {
  388. if (refresh_alt_ref_frame) {
  389. copy_frame(&denoiser->running_avg_y[ALTREF_FRAME],
  390. &denoiser->running_avg_y[INTRA_FRAME]);
  391. }
  392. if (refresh_golden_frame) {
  393. copy_frame(&denoiser->running_avg_y[GOLDEN_FRAME],
  394. &denoiser->running_avg_y[INTRA_FRAME]);
  395. }
  396. if (refresh_last_frame) {
  397. copy_frame(&denoiser->running_avg_y[LAST_FRAME],
  398. &denoiser->running_avg_y[INTRA_FRAME]);
  399. }
  400. } else {
  401. if (refresh_alt_ref_frame) {
  402. swap_frame_buffer(&denoiser->running_avg_y[ALTREF_FRAME],
  403. &denoiser->running_avg_y[INTRA_FRAME]);
  404. }
  405. if (refresh_golden_frame) {
  406. swap_frame_buffer(&denoiser->running_avg_y[GOLDEN_FRAME],
  407. &denoiser->running_avg_y[INTRA_FRAME]);
  408. }
  409. if (refresh_last_frame) {
  410. swap_frame_buffer(&denoiser->running_avg_y[LAST_FRAME],
  411. &denoiser->running_avg_y[INTRA_FRAME]);
  412. }
  413. }
  414. }
  415. void vp9_denoiser_reset_frame_stats(PICK_MODE_CONTEXT *ctx) {
  416. ctx->zeromv_sse = UINT_MAX;
  417. ctx->newmv_sse = UINT_MAX;
  418. ctx->zeromv_lastref_sse = UINT_MAX;
  419. ctx->best_sse_mv.as_int = 0;
  420. }
  421. void vp9_denoiser_update_frame_stats(MODE_INFO *mi, unsigned int sse,
  422. PREDICTION_MODE mode,
  423. PICK_MODE_CONTEXT *ctx) {
  424. if (mi->mv[0].as_int == 0 && sse < ctx->zeromv_sse) {
  425. ctx->zeromv_sse = sse;
  426. ctx->best_zeromv_reference_frame = mi->ref_frame[0];
  427. if (mi->ref_frame[0] == LAST_FRAME) ctx->zeromv_lastref_sse = sse;
  428. }
  429. if (mi->mv[0].as_int != 0 && sse < ctx->newmv_sse) {
  430. ctx->newmv_sse = sse;
  431. ctx->best_sse_inter_mode = mode;
  432. ctx->best_sse_mv = mi->mv[0];
  433. ctx->best_reference_frame = mi->ref_frame[0];
  434. }
  435. }
  436. int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, int ssx,
  437. int ssy,
  438. #if CONFIG_VP9_HIGHBITDEPTH
  439. int use_highbitdepth,
  440. #endif
  441. int border) {
  442. int i, fail;
  443. const int legacy_byte_alignment = 0;
  444. assert(denoiser != NULL);
  445. for (i = 0; i < MAX_REF_FRAMES; ++i) {
  446. fail = vpx_alloc_frame_buffer(&denoiser->running_avg_y[i], width, height,
  447. ssx, ssy,
  448. #if CONFIG_VP9_HIGHBITDEPTH
  449. use_highbitdepth,
  450. #endif
  451. border, legacy_byte_alignment);
  452. if (fail) {
  453. vp9_denoiser_free(denoiser);
  454. return 1;
  455. }
  456. #ifdef OUTPUT_YUV_DENOISED
  457. make_grayscale(&denoiser->running_avg_y[i]);
  458. #endif
  459. }
  460. fail = vpx_alloc_frame_buffer(&denoiser->mc_running_avg_y, width, height, ssx,
  461. ssy,
  462. #if CONFIG_VP9_HIGHBITDEPTH
  463. use_highbitdepth,
  464. #endif
  465. border, legacy_byte_alignment);
  466. if (fail) {
  467. vp9_denoiser_free(denoiser);
  468. return 1;
  469. }
  470. fail = vpx_alloc_frame_buffer(&denoiser->last_source, width, height, ssx, ssy,
  471. #if CONFIG_VP9_HIGHBITDEPTH
  472. use_highbitdepth,
  473. #endif
  474. border, legacy_byte_alignment);
  475. if (fail) {
  476. vp9_denoiser_free(denoiser);
  477. return 1;
  478. }
  479. #ifdef OUTPUT_YUV_DENOISED
  480. make_grayscale(&denoiser->running_avg_y[i]);
  481. #endif
  482. denoiser->increase_denoising = 0;
  483. denoiser->frame_buffer_initialized = 1;
  484. denoiser->denoising_level = kDenLow;
  485. denoiser->prev_denoising_level = kDenLow;
  486. denoiser->reset = 0;
  487. return 0;
  488. }
  489. void vp9_denoiser_free(VP9_DENOISER *denoiser) {
  490. int i;
  491. if (denoiser == NULL) {
  492. return;
  493. }
  494. denoiser->frame_buffer_initialized = 0;
  495. for (i = 0; i < MAX_REF_FRAMES; ++i) {
  496. vpx_free_frame_buffer(&denoiser->running_avg_y[i]);
  497. }
  498. vpx_free_frame_buffer(&denoiser->mc_running_avg_y);
  499. vpx_free_frame_buffer(&denoiser->last_source);
  500. }
  501. void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser, int noise_level) {
  502. denoiser->denoising_level = noise_level;
  503. if (denoiser->denoising_level > kDenLowLow &&
  504. denoiser->prev_denoising_level == kDenLowLow)
  505. denoiser->reset = 1;
  506. else
  507. denoiser->reset = 0;
  508. denoiser->prev_denoising_level = denoiser->denoising_level;
  509. }
  510. #ifdef OUTPUT_YUV_DENOISED
  511. static void make_grayscale(YV12_BUFFER_CONFIG *yuv) {
  512. int r, c;
  513. uint8_t *u = yuv->u_buffer;
  514. uint8_t *v = yuv->v_buffer;
  515. for (r = 0; r < yuv->uv_height; ++r) {
  516. for (c = 0; c < yuv->uv_width; ++c) {
  517. u[c] = UINT8_MAX / 2;
  518. v[c] = UINT8_MAX / 2;
  519. }
  520. u += yuv->uv_stride;
  521. v += yuv->uv_stride;
  522. }
  523. }
  524. #endif