denoising_neon.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /*
  2. * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <arm_neon.h>
  11. #include "vp8/encoder/denoising.h"
  12. #include "vpx_mem/vpx_mem.h"
  13. #include "./vp8_rtcd.h"
  14. /*
  15. * The filter function was modified to reduce the computational complexity.
  16. *
  17. * Step 1:
  18. * Instead of applying tap coefficients for each pixel, we calculated the
  19. * pixel adjustments vs. pixel diff value ahead of time.
  20. * adjustment = filtered_value - current_raw
  21. * = (filter_coefficient * diff + 128) >> 8
  22. * where
  23. * filter_coefficient = (255 << 8) / (256 + ((abs_diff * 330) >> 3));
  24. * filter_coefficient += filter_coefficient /
  25. * (3 + motion_magnitude_adjustment);
  26. * filter_coefficient is clamped to 0 ~ 255.
  27. *
  28. * Step 2:
  29. * The adjustment vs. diff curve becomes flat very quick when diff increases.
  30. * This allowed us to use only several levels to approximate the curve without
  31. * changing the filtering algorithm too much.
  32. * The adjustments were further corrected by checking the motion magnitude.
  33. * The levels used are:
  34. * diff level adjustment w/o adjustment w/
  35. * motion correction motion correction
  36. * [-255, -16] 3 -6 -7
  37. * [-15, -8] 2 -4 -5
  38. * [-7, -4] 1 -3 -4
  39. * [-3, 3] 0 diff diff
  40. * [4, 7] 1 3 4
  41. * [8, 15] 2 4 5
  42. * [16, 255] 3 6 7
  43. */
  44. int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y,
  45. int mc_running_avg_y_stride,
  46. unsigned char *running_avg_y,
  47. int running_avg_y_stride, unsigned char *sig,
  48. int sig_stride, unsigned int motion_magnitude,
  49. int increase_denoising) {
  50. /* If motion_magnitude is small, making the denoiser more aggressive by
  51. * increasing the adjustment for each level, level1 adjustment is
  52. * increased, the deltas stay the same.
  53. */
  54. int shift_inc =
  55. (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
  56. ? 1
  57. : 0;
  58. const uint8x16_t v_level1_adjustment = vmovq_n_u8(
  59. (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3);
  60. const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
  61. const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
  62. const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
  63. const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
  64. const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
  65. int64x2_t v_sum_diff_total = vdupq_n_s64(0);
  66. /* Go over lines. */
  67. int r;
  68. for (r = 0; r < 16; ++r) {
  69. /* Load inputs. */
  70. const uint8x16_t v_sig = vld1q_u8(sig);
  71. const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
  72. /* Calculate absolute difference and sign masks. */
  73. const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
  74. const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
  75. const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
  76. /* Figure out which level that put us in. */
  77. const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
  78. const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
  79. const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
  80. /* Calculate absolute adjustments for level 1, 2 and 3. */
  81. const uint8x16_t v_level2_adjustment =
  82. vandq_u8(v_level2_mask, v_delta_level_1_and_2);
  83. const uint8x16_t v_level3_adjustment =
  84. vandq_u8(v_level3_mask, v_delta_level_2_and_3);
  85. const uint8x16_t v_level1and2_adjustment =
  86. vaddq_u8(v_level1_adjustment, v_level2_adjustment);
  87. const uint8x16_t v_level1and2and3_adjustment =
  88. vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
  89. /* Figure adjustment absolute value by selecting between the absolute
  90. * difference if in level0 or the value for level 1, 2 and 3.
  91. */
  92. const uint8x16_t v_abs_adjustment =
  93. vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
  94. /* Calculate positive and negative adjustments. Apply them to the signal
  95. * and accumulate them. Adjustments are less than eight and the maximum
  96. * sum of them (7 * 16) can fit in a signed char.
  97. */
  98. const uint8x16_t v_pos_adjustment =
  99. vandq_u8(v_diff_pos_mask, v_abs_adjustment);
  100. const uint8x16_t v_neg_adjustment =
  101. vandq_u8(v_diff_neg_mask, v_abs_adjustment);
  102. uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
  103. v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
  104. /* Store results. */
  105. vst1q_u8(running_avg_y, v_running_avg_y);
  106. /* Sum all the accumulators to have the sum of all pixel differences
  107. * for this macroblock.
  108. */
  109. {
  110. const int8x16_t v_sum_diff =
  111. vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
  112. vreinterpretq_s8_u8(v_neg_adjustment));
  113. const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
  114. const int32x4_t fedc_ba98_7654_3210 =
  115. vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
  116. const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
  117. v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
  118. }
  119. /* Update pointers for next iteration. */
  120. sig += sig_stride;
  121. mc_running_avg_y += mc_running_avg_y_stride;
  122. running_avg_y += running_avg_y_stride;
  123. }
  124. /* Too much adjustments => copy block. */
  125. {
  126. int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
  127. vget_low_s64(v_sum_diff_total));
  128. int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
  129. int sum_diff_thresh = SUM_DIFF_THRESHOLD;
  130. if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
  131. if (sum_diff > sum_diff_thresh) {
  132. // Before returning to copy the block (i.e., apply no denoising),
  133. // checK if we can still apply some (weaker) temporal filtering to
  134. // this block, that would otherwise not be denoised at all. Simplest
  135. // is to apply an additional adjustment to running_avg_y to bring it
  136. // closer to sig. The adjustment is capped by a maximum delta, and
  137. // chosen such that in most cases the resulting sum_diff will be
  138. // within the accceptable range given by sum_diff_thresh.
  139. // The delta is set by the excess of absolute pixel diff over the
  140. // threshold.
  141. int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
  142. // Only apply the adjustment for max delta up to 3.
  143. if (delta < 4) {
  144. const uint8x16_t k_delta = vmovq_n_u8(delta);
  145. sig -= sig_stride * 16;
  146. mc_running_avg_y -= mc_running_avg_y_stride * 16;
  147. running_avg_y -= running_avg_y_stride * 16;
  148. for (r = 0; r < 16; ++r) {
  149. uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y);
  150. const uint8x16_t v_sig = vld1q_u8(sig);
  151. const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
  152. /* Calculate absolute difference and sign masks. */
  153. const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
  154. const uint8x16_t v_diff_pos_mask =
  155. vcltq_u8(v_sig, v_mc_running_avg_y);
  156. const uint8x16_t v_diff_neg_mask =
  157. vcgtq_u8(v_sig, v_mc_running_avg_y);
  158. // Clamp absolute difference to delta to get the adjustment.
  159. const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
  160. const uint8x16_t v_pos_adjustment =
  161. vandq_u8(v_diff_pos_mask, v_abs_adjustment);
  162. const uint8x16_t v_neg_adjustment =
  163. vandq_u8(v_diff_neg_mask, v_abs_adjustment);
  164. v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment);
  165. v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment);
  166. /* Store results. */
  167. vst1q_u8(running_avg_y, v_running_avg_y);
  168. {
  169. const int8x16_t v_sum_diff =
  170. vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
  171. vreinterpretq_s8_u8(v_pos_adjustment));
  172. const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
  173. const int32x4_t fedc_ba98_7654_3210 =
  174. vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
  175. const int64x2_t fedcba98_76543210 =
  176. vpaddlq_s32(fedc_ba98_7654_3210);
  177. v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
  178. }
  179. /* Update pointers for next iteration. */
  180. sig += sig_stride;
  181. mc_running_avg_y += mc_running_avg_y_stride;
  182. running_avg_y += running_avg_y_stride;
  183. }
  184. {
  185. // Update the sum of all pixel differences of this MB.
  186. x = vqadd_s64(vget_high_s64(v_sum_diff_total),
  187. vget_low_s64(v_sum_diff_total));
  188. sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
  189. if (sum_diff > sum_diff_thresh) {
  190. return COPY_BLOCK;
  191. }
  192. }
  193. } else {
  194. return COPY_BLOCK;
  195. }
  196. }
  197. }
  198. /* Tell above level that block was filtered. */
  199. running_avg_y -= running_avg_y_stride * 16;
  200. sig -= sig_stride * 16;
  201. vp8_copy_mem16x16(running_avg_y, running_avg_y_stride, sig, sig_stride);
  202. return FILTER_BLOCK;
  203. }
  204. int vp8_denoiser_filter_uv_neon(unsigned char *mc_running_avg,
  205. int mc_running_avg_stride,
  206. unsigned char *running_avg,
  207. int running_avg_stride, unsigned char *sig,
  208. int sig_stride, unsigned int motion_magnitude,
  209. int increase_denoising) {
  210. /* If motion_magnitude is small, making the denoiser more aggressive by
  211. * increasing the adjustment for each level, level1 adjustment is
  212. * increased, the deltas stay the same.
  213. */
  214. int shift_inc =
  215. (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV)
  216. ? 1
  217. : 0;
  218. const uint8x16_t v_level1_adjustment = vmovq_n_u8(
  219. (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 4 + shift_inc : 3);
  220. const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
  221. const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
  222. const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
  223. const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
  224. const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
  225. int64x2_t v_sum_diff_total = vdupq_n_s64(0);
  226. int r;
  227. {
  228. uint16x4_t v_sum_block = vdup_n_u16(0);
  229. // Avoid denoising color signal if its close to average level.
  230. for (r = 0; r < 8; ++r) {
  231. const uint8x8_t v_sig = vld1_u8(sig);
  232. const uint16x4_t _76_54_32_10 = vpaddl_u8(v_sig);
  233. v_sum_block = vqadd_u16(v_sum_block, _76_54_32_10);
  234. sig += sig_stride;
  235. }
  236. sig -= sig_stride * 8;
  237. {
  238. const uint32x2_t _7654_3210 = vpaddl_u16(v_sum_block);
  239. const uint64x1_t _76543210 = vpaddl_u32(_7654_3210);
  240. const int sum_block = vget_lane_s32(vreinterpret_s32_u64(_76543210), 0);
  241. if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) {
  242. return COPY_BLOCK;
  243. }
  244. }
  245. }
  246. /* Go over lines. */
  247. for (r = 0; r < 4; ++r) {
  248. /* Load inputs. */
  249. const uint8x8_t v_sig_lo = vld1_u8(sig);
  250. const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]);
  251. const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi);
  252. const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg);
  253. const uint8x8_t v_mc_running_avg_hi =
  254. vld1_u8(&mc_running_avg[mc_running_avg_stride]);
  255. const uint8x16_t v_mc_running_avg =
  256. vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi);
  257. /* Calculate absolute difference and sign masks. */
  258. const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg);
  259. const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg);
  260. const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg);
  261. /* Figure out which level that put us in. */
  262. const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
  263. const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
  264. const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
  265. /* Calculate absolute adjustments for level 1, 2 and 3. */
  266. const uint8x16_t v_level2_adjustment =
  267. vandq_u8(v_level2_mask, v_delta_level_1_and_2);
  268. const uint8x16_t v_level3_adjustment =
  269. vandq_u8(v_level3_mask, v_delta_level_2_and_3);
  270. const uint8x16_t v_level1and2_adjustment =
  271. vaddq_u8(v_level1_adjustment, v_level2_adjustment);
  272. const uint8x16_t v_level1and2and3_adjustment =
  273. vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
  274. /* Figure adjustment absolute value by selecting between the absolute
  275. * difference if in level0 or the value for level 1, 2 and 3.
  276. */
  277. const uint8x16_t v_abs_adjustment =
  278. vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
  279. /* Calculate positive and negative adjustments. Apply them to the signal
  280. * and accumulate them. Adjustments are less than eight and the maximum
  281. * sum of them (7 * 16) can fit in a signed char.
  282. */
  283. const uint8x16_t v_pos_adjustment =
  284. vandq_u8(v_diff_pos_mask, v_abs_adjustment);
  285. const uint8x16_t v_neg_adjustment =
  286. vandq_u8(v_diff_neg_mask, v_abs_adjustment);
  287. uint8x16_t v_running_avg = vqaddq_u8(v_sig, v_pos_adjustment);
  288. v_running_avg = vqsubq_u8(v_running_avg, v_neg_adjustment);
  289. /* Store results. */
  290. vst1_u8(running_avg, vget_low_u8(v_running_avg));
  291. vst1_u8(&running_avg[running_avg_stride], vget_high_u8(v_running_avg));
  292. /* Sum all the accumulators to have the sum of all pixel differences
  293. * for this macroblock.
  294. */
  295. {
  296. const int8x16_t v_sum_diff =
  297. vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
  298. vreinterpretq_s8_u8(v_neg_adjustment));
  299. const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
  300. const int32x4_t fedc_ba98_7654_3210 =
  301. vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
  302. const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
  303. v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
  304. }
  305. /* Update pointers for next iteration. */
  306. sig += sig_stride * 2;
  307. mc_running_avg += mc_running_avg_stride * 2;
  308. running_avg += running_avg_stride * 2;
  309. }
  310. /* Too much adjustments => copy block. */
  311. {
  312. int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
  313. vget_low_s64(v_sum_diff_total));
  314. int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
  315. int sum_diff_thresh = SUM_DIFF_THRESHOLD_UV;
  316. if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV;
  317. if (sum_diff > sum_diff_thresh) {
  318. // Before returning to copy the block (i.e., apply no denoising),
  319. // checK if we can still apply some (weaker) temporal filtering to
  320. // this block, that would otherwise not be denoised at all. Simplest
  321. // is to apply an additional adjustment to running_avg_y to bring it
  322. // closer to sig. The adjustment is capped by a maximum delta, and
  323. // chosen such that in most cases the resulting sum_diff will be
  324. // within the accceptable range given by sum_diff_thresh.
  325. // The delta is set by the excess of absolute pixel diff over the
  326. // threshold.
  327. int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
  328. // Only apply the adjustment for max delta up to 3.
  329. if (delta < 4) {
  330. const uint8x16_t k_delta = vmovq_n_u8(delta);
  331. sig -= sig_stride * 8;
  332. mc_running_avg -= mc_running_avg_stride * 8;
  333. running_avg -= running_avg_stride * 8;
  334. for (r = 0; r < 4; ++r) {
  335. const uint8x8_t v_sig_lo = vld1_u8(sig);
  336. const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]);
  337. const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi);
  338. const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg);
  339. const uint8x8_t v_mc_running_avg_hi =
  340. vld1_u8(&mc_running_avg[mc_running_avg_stride]);
  341. const uint8x16_t v_mc_running_avg =
  342. vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi);
  343. /* Calculate absolute difference and sign masks. */
  344. const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg);
  345. const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg);
  346. const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg);
  347. // Clamp absolute difference to delta to get the adjustment.
  348. const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
  349. const uint8x16_t v_pos_adjustment =
  350. vandq_u8(v_diff_pos_mask, v_abs_adjustment);
  351. const uint8x16_t v_neg_adjustment =
  352. vandq_u8(v_diff_neg_mask, v_abs_adjustment);
  353. const uint8x8_t v_running_avg_lo = vld1_u8(running_avg);
  354. const uint8x8_t v_running_avg_hi =
  355. vld1_u8(&running_avg[running_avg_stride]);
  356. uint8x16_t v_running_avg =
  357. vcombine_u8(v_running_avg_lo, v_running_avg_hi);
  358. v_running_avg = vqsubq_u8(v_running_avg, v_pos_adjustment);
  359. v_running_avg = vqaddq_u8(v_running_avg, v_neg_adjustment);
  360. /* Store results. */
  361. vst1_u8(running_avg, vget_low_u8(v_running_avg));
  362. vst1_u8(&running_avg[running_avg_stride],
  363. vget_high_u8(v_running_avg));
  364. {
  365. const int8x16_t v_sum_diff =
  366. vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
  367. vreinterpretq_s8_u8(v_pos_adjustment));
  368. const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
  369. const int32x4_t fedc_ba98_7654_3210 =
  370. vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
  371. const int64x2_t fedcba98_76543210 =
  372. vpaddlq_s32(fedc_ba98_7654_3210);
  373. v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
  374. }
  375. /* Update pointers for next iteration. */
  376. sig += sig_stride * 2;
  377. mc_running_avg += mc_running_avg_stride * 2;
  378. running_avg += running_avg_stride * 2;
  379. }
  380. {
  381. // Update the sum of all pixel differences of this MB.
  382. x = vqadd_s64(vget_high_s64(v_sum_diff_total),
  383. vget_low_s64(v_sum_diff_total));
  384. sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
  385. if (sum_diff > sum_diff_thresh) {
  386. return COPY_BLOCK;
  387. }
  388. }
  389. } else {
  390. return COPY_BLOCK;
  391. }
  392. }
  393. }
  394. /* Tell above level that block was filtered. */
  395. running_avg -= running_avg_stride * 8;
  396. sig -= sig_stride * 8;
  397. vp8_copy_mem8x8(running_avg, running_avg_stride, sig, sig_stride);
  398. return FILTER_BLOCK;
  399. }