denoising_msa.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <stdlib.h>
  11. #include "./vp8_rtcd.h"
  12. #include "vp8/common/mips/msa/vp8_macros_msa.h"
  13. #include "vp8/encoder/denoising.h"
  14. int32_t vp8_denoiser_filter_msa(uint8_t *mc_running_avg_y_ptr,
  15. int32_t mc_avg_y_stride,
  16. uint8_t *running_avg_y_ptr,
  17. int32_t avg_y_stride, uint8_t *sig_ptr,
  18. int32_t sig_stride, uint32_t motion_magnitude,
  19. int32_t increase_denoising) {
  20. uint8_t *running_avg_y_start = running_avg_y_ptr;
  21. uint8_t *sig_start = sig_ptr;
  22. int32_t cnt = 0;
  23. int32_t sum_diff = 0;
  24. int32_t shift_inc1 = 3;
  25. int32_t delta = 0;
  26. int32_t sum_diff_thresh;
  27. v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
  28. v16u8 src8, src9, src10, src11, src12, src13, src14, src15;
  29. v16u8 mc_running_avg_y0, running_avg_y, sig0;
  30. v16u8 mc_running_avg_y1, running_avg_y1, sig1;
  31. v16u8 coeff0, coeff1;
  32. v8i16 diff0, diff1, abs_diff0, abs_diff1, abs_diff_neg0, abs_diff_neg1;
  33. v8i16 adjust0, adjust1, adjust2, adjust3;
  34. v8i16 shift_inc1_vec = { 0 };
  35. v8i16 col_sum0 = { 0 };
  36. v8i16 col_sum1 = { 0 };
  37. v8i16 col_sum2 = { 0 };
  38. v8i16 col_sum3 = { 0 };
  39. v8i16 temp0_h, temp1_h, temp2_h, temp3_h, cmp, delta_vec;
  40. v4i32 temp0_w;
  41. v2i64 temp0_d, temp1_d;
  42. v8i16 zero = { 0 };
  43. v8i16 one = __msa_ldi_h(1);
  44. v8i16 four = __msa_ldi_h(4);
  45. v8i16 val_127 = __msa_ldi_h(127);
  46. v8i16 adj_val = { 6, 4, 3, 0, -6, -4, -3, 0 };
  47. if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) {
  48. adj_val = __msa_add_a_h(adj_val, one);
  49. if (increase_denoising) {
  50. adj_val = __msa_add_a_h(adj_val, one);
  51. shift_inc1 = 4;
  52. }
  53. temp0_h = zero - adj_val;
  54. adj_val = (v8i16)__msa_ilvev_d((v2i64)temp0_h, (v2i64)adj_val);
  55. }
  56. adj_val = __msa_insert_h(adj_val, 3, cnt);
  57. adj_val = __msa_insert_h(adj_val, 7, cnt);
  58. shift_inc1_vec = __msa_fill_h(shift_inc1);
  59. for (cnt = 8; cnt--;) {
  60. v8i16 mask0 = { 0 };
  61. v8i16 mask1 = { 0 };
  62. mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr);
  63. sig0 = LD_UB(sig_ptr);
  64. sig_ptr += sig_stride;
  65. mc_running_avg_y_ptr += mc_avg_y_stride;
  66. mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr);
  67. sig1 = LD_UB(sig_ptr);
  68. ILVRL_B2_UB(mc_running_avg_y0, sig0, coeff0, coeff1);
  69. HSUB_UB2_SH(coeff0, coeff1, diff0, diff1);
  70. abs_diff0 = __msa_add_a_h(diff0, zero);
  71. abs_diff1 = __msa_add_a_h(diff1, zero);
  72. cmp = __msa_clei_s_h(abs_diff0, 15);
  73. cmp = cmp & one;
  74. mask0 += cmp;
  75. cmp = __msa_clei_s_h(abs_diff0, 7);
  76. cmp = cmp & one;
  77. mask0 += cmp;
  78. cmp = abs_diff0 < shift_inc1_vec;
  79. cmp = cmp & one;
  80. mask0 += cmp;
  81. cmp = __msa_clei_s_h(abs_diff1, 15);
  82. cmp = cmp & one;
  83. mask1 += cmp;
  84. cmp = __msa_clei_s_h(abs_diff1, 7);
  85. cmp = cmp & one;
  86. mask1 += cmp;
  87. cmp = abs_diff1 < shift_inc1_vec;
  88. cmp = cmp & one;
  89. mask1 += cmp;
  90. temp0_h = __msa_clei_s_h(diff0, 0);
  91. temp0_h = temp0_h & four;
  92. mask0 += temp0_h;
  93. temp1_h = __msa_clei_s_h(diff1, 0);
  94. temp1_h = temp1_h & four;
  95. mask1 += temp1_h;
  96. VSHF_H2_SH(adj_val, adj_val, adj_val, adj_val, mask0, mask1, adjust0,
  97. adjust1);
  98. temp2_h = __msa_ceqi_h(adjust0, 0);
  99. temp3_h = __msa_ceqi_h(adjust1, 0);
  100. adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h);
  101. adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)diff1, (v16u8)temp3_h);
  102. ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1);
  103. UNPCK_UB_SH(sig0, temp0_h, temp1_h);
  104. ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h);
  105. MAXI_SH2_SH(temp0_h, temp1_h, 0);
  106. SAT_UH2_SH(temp0_h, temp1_h, 7);
  107. temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h);
  108. running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h);
  109. running_avg_y =
  110. __msa_bmnz_v(running_avg_y, mc_running_avg_y0, (v16u8)temp2_h);
  111. ST_UB(running_avg_y, running_avg_y_ptr);
  112. running_avg_y_ptr += avg_y_stride;
  113. mask0 = zero;
  114. mask1 = zero;
  115. ILVRL_B2_UB(mc_running_avg_y1, sig1, coeff0, coeff1);
  116. HSUB_UB2_SH(coeff0, coeff1, diff0, diff1);
  117. abs_diff0 = __msa_add_a_h(diff0, zero);
  118. abs_diff1 = __msa_add_a_h(diff1, zero);
  119. cmp = __msa_clei_s_h(abs_diff0, 15);
  120. cmp = cmp & one;
  121. mask0 += cmp;
  122. cmp = __msa_clei_s_h(abs_diff0, 7);
  123. cmp = cmp & one;
  124. mask0 += cmp;
  125. cmp = abs_diff0 < shift_inc1_vec;
  126. cmp = cmp & one;
  127. mask0 += cmp;
  128. cmp = __msa_clei_s_h(abs_diff1, 15);
  129. cmp = cmp & one;
  130. mask1 += cmp;
  131. cmp = __msa_clei_s_h(abs_diff1, 7);
  132. cmp = cmp & one;
  133. mask1 += cmp;
  134. cmp = abs_diff1 < shift_inc1_vec;
  135. cmp = cmp & one;
  136. mask1 += cmp;
  137. temp0_h = __msa_clei_s_h(diff0, 0);
  138. temp0_h = temp0_h & four;
  139. mask0 += temp0_h;
  140. temp1_h = __msa_clei_s_h(diff1, 0);
  141. temp1_h = temp1_h & four;
  142. mask1 += temp1_h;
  143. VSHF_H2_SH(adj_val, adj_val, adj_val, adj_val, mask0, mask1, adjust0,
  144. adjust1);
  145. temp2_h = __msa_ceqi_h(adjust0, 0);
  146. temp3_h = __msa_ceqi_h(adjust1, 0);
  147. adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h);
  148. adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)diff1, (v16u8)temp3_h);
  149. ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1);
  150. UNPCK_UB_SH(sig1, temp0_h, temp1_h);
  151. ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h);
  152. MAXI_SH2_SH(temp0_h, temp1_h, 0);
  153. SAT_UH2_SH(temp0_h, temp1_h, 7);
  154. temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h);
  155. running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h);
  156. running_avg_y =
  157. __msa_bmnz_v(running_avg_y, mc_running_avg_y1, (v16u8)temp2_h);
  158. ST_UB(running_avg_y, running_avg_y_ptr);
  159. sig_ptr += sig_stride;
  160. mc_running_avg_y_ptr += mc_avg_y_stride;
  161. running_avg_y_ptr += avg_y_stride;
  162. }
  163. col_sum0 = __msa_min_s_h(col_sum0, val_127);
  164. col_sum1 = __msa_min_s_h(col_sum1, val_127);
  165. temp0_h = col_sum0 + col_sum1;
  166. temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
  167. temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
  168. temp1_d = __msa_splati_d(temp0_d, 1);
  169. temp0_d += temp1_d;
  170. sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0);
  171. sig_ptr -= sig_stride * 16;
  172. mc_running_avg_y_ptr -= mc_avg_y_stride * 16;
  173. running_avg_y_ptr -= avg_y_stride * 16;
  174. if (increase_denoising) {
  175. sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
  176. }
  177. if (abs(sum_diff) > sum_diff_thresh) {
  178. delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1;
  179. delta_vec = __msa_fill_h(delta);
  180. if (delta < 4) {
  181. for (cnt = 8; cnt--;) {
  182. running_avg_y = LD_UB(running_avg_y_ptr);
  183. mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr);
  184. sig0 = LD_UB(sig_ptr);
  185. sig_ptr += sig_stride;
  186. mc_running_avg_y_ptr += mc_avg_y_stride;
  187. running_avg_y_ptr += avg_y_stride;
  188. mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr);
  189. sig1 = LD_UB(sig_ptr);
  190. running_avg_y1 = LD_UB(running_avg_y_ptr);
  191. ILVRL_B2_UB(mc_running_avg_y0, sig0, coeff0, coeff1);
  192. HSUB_UB2_SH(coeff0, coeff1, diff0, diff1);
  193. abs_diff0 = __msa_add_a_h(diff0, zero);
  194. abs_diff1 = __msa_add_a_h(diff1, zero);
  195. temp0_h = abs_diff0 < delta_vec;
  196. temp1_h = abs_diff1 < delta_vec;
  197. abs_diff0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)delta_vec,
  198. (v16u8)temp0_h);
  199. abs_diff1 = (v8i16)__msa_bmz_v((v16u8)abs_diff1, (v16u8)delta_vec,
  200. (v16u8)temp1_h);
  201. SUB2(zero, abs_diff0, zero, abs_diff1, abs_diff_neg0, abs_diff_neg1);
  202. abs_diff_neg0 = zero - abs_diff0;
  203. abs_diff_neg1 = zero - abs_diff1;
  204. temp0_h = __msa_clei_s_h(diff0, 0);
  205. temp1_h = __msa_clei_s_h(diff1, 0);
  206. adjust0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0,
  207. (v16u8)temp0_h);
  208. adjust1 = (v8i16)__msa_bmnz_v((v16u8)abs_diff1, (v16u8)abs_diff_neg1,
  209. (v16u8)temp1_h);
  210. ILVRL_B2_SH(zero, running_avg_y, temp2_h, temp3_h);
  211. ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3);
  212. MAXI_SH2_SH(adjust2, adjust3, 0);
  213. SAT_UH2_SH(adjust2, adjust3, 7);
  214. temp0_h = __msa_ceqi_h(diff0, 0);
  215. temp1_h = __msa_ceqi_h(diff1, 0);
  216. adjust2 =
  217. (v8i16)__msa_bmz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h);
  218. adjust3 =
  219. (v8i16)__msa_bmz_v((v16u8)adjust3, (v16u8)temp3_h, (v16u8)temp1_h);
  220. adjust0 =
  221. (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h);
  222. adjust1 =
  223. (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)zero, (v16u8)temp1_h);
  224. ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3);
  225. running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, (v16i8)adjust2);
  226. ST_UB(running_avg_y, running_avg_y_ptr - avg_y_stride);
  227. ILVRL_B2_UB(mc_running_avg_y1, sig1, coeff0, coeff1);
  228. HSUB_UB2_SH(coeff0, coeff1, diff0, diff1);
  229. abs_diff0 = __msa_add_a_h(diff0, zero);
  230. abs_diff1 = __msa_add_a_h(diff1, zero);
  231. temp0_h = abs_diff0 < delta_vec;
  232. temp1_h = abs_diff1 < delta_vec;
  233. abs_diff0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)delta_vec,
  234. (v16u8)temp0_h);
  235. abs_diff1 = (v8i16)__msa_bmz_v((v16u8)abs_diff1, (v16u8)delta_vec,
  236. (v16u8)temp1_h);
  237. SUB2(zero, abs_diff0, zero, abs_diff1, abs_diff_neg0, abs_diff_neg1);
  238. temp0_h = __msa_clei_s_h(diff0, 0);
  239. temp1_h = __msa_clei_s_h(diff1, 0);
  240. adjust0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0,
  241. (v16u8)temp0_h);
  242. adjust1 = (v8i16)__msa_bmnz_v((v16u8)abs_diff1, (v16u8)abs_diff_neg1,
  243. (v16u8)temp1_h);
  244. ILVRL_H2_SH(zero, running_avg_y1, temp2_h, temp3_h);
  245. ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3);
  246. MAXI_SH2_SH(adjust2, adjust3, 0);
  247. SAT_UH2_SH(adjust2, adjust3, 7);
  248. temp0_h = __msa_ceqi_h(diff0, 0);
  249. temp1_h = __msa_ceqi_h(diff1, 0);
  250. adjust2 =
  251. (v8i16)__msa_bmz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h);
  252. adjust3 =
  253. (v8i16)__msa_bmz_v((v16u8)adjust3, (v16u8)temp3_h, (v16u8)temp1_h);
  254. adjust0 =
  255. (v8i16)__msa_bmz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h);
  256. adjust1 =
  257. (v8i16)__msa_bmz_v((v16u8)adjust1, (v16u8)zero, (v16u8)temp1_h);
  258. ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3);
  259. running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, (v16i8)adjust2);
  260. ST_UB(running_avg_y, running_avg_y_ptr);
  261. running_avg_y_ptr += avg_y_stride;
  262. }
  263. col_sum2 = __msa_min_s_h(col_sum2, val_127);
  264. col_sum3 = __msa_min_s_h(col_sum3, val_127);
  265. temp0_h = col_sum2 + col_sum3;
  266. temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
  267. temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
  268. temp1_d = __msa_splati_d(temp0_d, 1);
  269. temp0_d += (v2i64)temp1_d;
  270. sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0);
  271. if (abs(sum_diff) > SUM_DIFF_THRESHOLD) {
  272. return COPY_BLOCK;
  273. }
  274. } else {
  275. return COPY_BLOCK;
  276. }
  277. }
  278. LD_UB8(sig_start, sig_stride, src0, src1, src2, src3, src4, src5, src6, src7);
  279. sig_start += (8 * sig_stride);
  280. LD_UB8(sig_start, sig_stride, src8, src9, src10, src11, src12, src13, src14,
  281. src15);
  282. ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, running_avg_y_start,
  283. avg_y_stride);
  284. running_avg_y_start += (8 * avg_y_stride);
  285. ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15,
  286. running_avg_y_start, avg_y_stride);
  287. return FILTER_BLOCK;
  288. }
  289. int32_t vp8_denoiser_filter_uv_msa(
  290. uint8_t *mc_running_avg_y_ptr, int32_t mc_avg_y_stride,
  291. uint8_t *running_avg_y_ptr, int32_t avg_y_stride, uint8_t *sig_ptr,
  292. int32_t sig_stride, uint32_t motion_magnitude, int32_t increase_denoising) {
  293. uint8_t *running_avg_y_start = running_avg_y_ptr;
  294. uint8_t *sig_start = sig_ptr;
  295. int32_t cnt = 0;
  296. int32_t sum_diff = 0;
  297. int32_t shift_inc1 = 3;
  298. int32_t delta = 0;
  299. int32_t sum_block = 0;
  300. int32_t sum_diff_thresh;
  301. int64_t dst0, dst1, src0, src1, src2, src3;
  302. v16u8 mc_running_avg_y0, running_avg_y, sig0;
  303. v16u8 mc_running_avg_y1, running_avg_y1, sig1;
  304. v16u8 sig2, sig3, sig4, sig5, sig6, sig7;
  305. v16u8 coeff0;
  306. v8i16 diff0, abs_diff0, abs_diff_neg0;
  307. v8i16 adjust0, adjust2;
  308. v8i16 shift_inc1_vec = { 0 };
  309. v8i16 col_sum0 = { 0 };
  310. v8i16 temp0_h, temp2_h, cmp, delta_vec;
  311. v4i32 temp0_w;
  312. v2i64 temp0_d, temp1_d;
  313. v16i8 zero = { 0 };
  314. v8i16 one = __msa_ldi_h(1);
  315. v8i16 four = __msa_ldi_h(4);
  316. v8i16 adj_val = { 6, 4, 3, 0, -6, -4, -3, 0 };
  317. sig0 = LD_UB(sig_ptr);
  318. sig_ptr += sig_stride;
  319. temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0);
  320. sig1 = LD_UB(sig_ptr);
  321. sig_ptr += sig_stride;
  322. temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig1);
  323. sig2 = LD_UB(sig_ptr);
  324. sig_ptr += sig_stride;
  325. temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig2);
  326. sig3 = LD_UB(sig_ptr);
  327. sig_ptr += sig_stride;
  328. temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig3);
  329. sig4 = LD_UB(sig_ptr);
  330. sig_ptr += sig_stride;
  331. temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig4);
  332. sig5 = LD_UB(sig_ptr);
  333. sig_ptr += sig_stride;
  334. temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig5);
  335. sig6 = LD_UB(sig_ptr);
  336. sig_ptr += sig_stride;
  337. temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig6);
  338. sig7 = LD_UB(sig_ptr);
  339. sig_ptr += sig_stride;
  340. temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig7);
  341. temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
  342. temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
  343. temp1_d = __msa_splati_d(temp0_d, 1);
  344. temp0_d += temp1_d;
  345. sum_block = __msa_copy_s_w((v4i32)temp0_d, 0);
  346. sig_ptr -= sig_stride * 8;
  347. if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) {
  348. return COPY_BLOCK;
  349. }
  350. if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) {
  351. adj_val = __msa_add_a_h(adj_val, one);
  352. if (increase_denoising) {
  353. adj_val = __msa_add_a_h(adj_val, one);
  354. shift_inc1 = 4;
  355. }
  356. temp0_h = (v8i16)zero - adj_val;
  357. adj_val = (v8i16)__msa_ilvev_d((v2i64)temp0_h, (v2i64)adj_val);
  358. }
  359. adj_val = __msa_insert_h(adj_val, 3, cnt);
  360. adj_val = __msa_insert_h(adj_val, 7, cnt);
  361. shift_inc1_vec = __msa_fill_h(shift_inc1);
  362. for (cnt = 4; cnt--;) {
  363. v8i16 mask0 = { 0 };
  364. mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr);
  365. sig0 = LD_UB(sig_ptr);
  366. sig_ptr += sig_stride;
  367. mc_running_avg_y_ptr += mc_avg_y_stride;
  368. mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr);
  369. sig1 = LD_UB(sig_ptr);
  370. coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y0, (v16i8)sig0);
  371. diff0 = __msa_hsub_u_h(coeff0, coeff0);
  372. abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero);
  373. cmp = __msa_clei_s_h(abs_diff0, 15);
  374. cmp = cmp & one;
  375. mask0 += cmp;
  376. cmp = __msa_clei_s_h(abs_diff0, 7);
  377. cmp = cmp & one;
  378. mask0 += cmp;
  379. cmp = abs_diff0 < shift_inc1_vec;
  380. cmp = cmp & one;
  381. mask0 += cmp;
  382. temp0_h = __msa_clei_s_h(diff0, 0);
  383. temp0_h = temp0_h & four;
  384. mask0 += temp0_h;
  385. adjust0 = __msa_vshf_h(mask0, adj_val, adj_val);
  386. temp2_h = __msa_ceqi_h(adjust0, 0);
  387. adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h);
  388. col_sum0 += adjust0;
  389. temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0);
  390. temp0_h += adjust0;
  391. temp0_h = __msa_maxi_s_h(temp0_h, 0);
  392. temp0_h = (v8i16)__msa_sat_u_h((v8u16)temp0_h, 7);
  393. temp2_h = (v8i16)__msa_pckev_b((v16i8)temp2_h, (v16i8)temp2_h);
  394. running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp0_h, (v16i8)temp0_h);
  395. running_avg_y =
  396. __msa_bmnz_v(running_avg_y, mc_running_avg_y0, (v16u8)temp2_h);
  397. dst0 = __msa_copy_s_d((v2i64)running_avg_y, 0);
  398. SD(dst0, running_avg_y_ptr);
  399. running_avg_y_ptr += avg_y_stride;
  400. mask0 = __msa_ldi_h(0);
  401. coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y1, (v16i8)sig1);
  402. diff0 = __msa_hsub_u_h(coeff0, coeff0);
  403. abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero);
  404. cmp = __msa_clei_s_h(abs_diff0, 15);
  405. cmp = cmp & one;
  406. mask0 += cmp;
  407. cmp = __msa_clei_s_h(abs_diff0, 7);
  408. cmp = cmp & one;
  409. mask0 += cmp;
  410. cmp = abs_diff0 < shift_inc1_vec;
  411. cmp = cmp & one;
  412. mask0 += cmp;
  413. temp0_h = __msa_clei_s_h(diff0, 0);
  414. temp0_h = temp0_h & four;
  415. mask0 += temp0_h;
  416. adjust0 = __msa_vshf_h(mask0, adj_val, adj_val);
  417. temp2_h = __msa_ceqi_h(adjust0, 0);
  418. adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h);
  419. col_sum0 += adjust0;
  420. temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig1);
  421. temp0_h += adjust0;
  422. temp0_h = __msa_maxi_s_h(temp0_h, 0);
  423. temp0_h = (v8i16)__msa_sat_u_h((v8u16)temp0_h, 7);
  424. temp2_h = (v8i16)__msa_pckev_b((v16i8)temp2_h, (v16i8)temp2_h);
  425. running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp0_h, (v16i8)temp0_h);
  426. running_avg_y =
  427. __msa_bmnz_v(running_avg_y, mc_running_avg_y1, (v16u8)temp2_h);
  428. dst1 = __msa_copy_s_d((v2i64)running_avg_y, 0);
  429. SD(dst1, running_avg_y_ptr);
  430. sig_ptr += sig_stride;
  431. mc_running_avg_y_ptr += mc_avg_y_stride;
  432. running_avg_y_ptr += avg_y_stride;
  433. }
  434. temp0_h = col_sum0;
  435. temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
  436. temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
  437. temp1_d = __msa_splati_d(temp0_d, 1);
  438. temp0_d += temp1_d;
  439. sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0);
  440. sig_ptr -= sig_stride * 8;
  441. mc_running_avg_y_ptr -= mc_avg_y_stride * 8;
  442. running_avg_y_ptr -= avg_y_stride * 8;
  443. sum_diff_thresh = SUM_DIFF_THRESHOLD_UV;
  444. if (increase_denoising) {
  445. sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV;
  446. }
  447. if (abs(sum_diff) > sum_diff_thresh) {
  448. delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1;
  449. delta_vec = __msa_fill_h(delta);
  450. if (delta < 4) {
  451. for (cnt = 4; cnt--;) {
  452. running_avg_y = LD_UB(running_avg_y_ptr);
  453. mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr);
  454. sig0 = LD_UB(sig_ptr);
  455. /* Update pointers for next iteration. */
  456. sig_ptr += sig_stride;
  457. mc_running_avg_y_ptr += mc_avg_y_stride;
  458. running_avg_y_ptr += avg_y_stride;
  459. mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr);
  460. sig1 = LD_UB(sig_ptr);
  461. running_avg_y1 = LD_UB(running_avg_y_ptr);
  462. coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y0, (v16i8)sig0);
  463. diff0 = __msa_hsub_u_h(coeff0, coeff0);
  464. abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero);
  465. temp0_h = delta_vec < abs_diff0;
  466. abs_diff0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)delta_vec,
  467. (v16u8)temp0_h);
  468. abs_diff_neg0 = (v8i16)zero - abs_diff0;
  469. temp0_h = __msa_clei_s_h(diff0, 0);
  470. adjust0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0,
  471. (v16u8)temp0_h);
  472. temp2_h = (v8i16)__msa_ilvr_b(zero, (v16i8)running_avg_y);
  473. adjust2 = temp2_h + adjust0;
  474. adjust2 = __msa_maxi_s_h(adjust2, 0);
  475. adjust2 = (v8i16)__msa_sat_u_h((v8u16)adjust2, 7);
  476. temp0_h = __msa_ceqi_h(diff0, 0);
  477. adjust2 =
  478. (v8i16)__msa_bmnz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h);
  479. adjust0 =
  480. (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h);
  481. col_sum0 += adjust0;
  482. running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust2, (v16i8)adjust2);
  483. dst0 = __msa_copy_s_d((v2i64)running_avg_y, 0);
  484. SD(dst0, running_avg_y_ptr - avg_y_stride);
  485. coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y1, (v16i8)sig1);
  486. diff0 = __msa_hsub_u_h(coeff0, coeff0);
  487. abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero);
  488. temp0_h = delta_vec < abs_diff0;
  489. abs_diff0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)delta_vec,
  490. (v16u8)temp0_h);
  491. abs_diff_neg0 = (v8i16)zero - abs_diff0;
  492. temp0_h = __msa_clei_s_h(diff0, 0);
  493. adjust0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0,
  494. (v16u8)temp0_h);
  495. temp2_h = (v8i16)__msa_ilvr_b(zero, (v16i8)running_avg_y1);
  496. adjust2 = temp2_h + adjust0;
  497. adjust2 = __msa_maxi_s_h(adjust2, 0);
  498. adjust2 = (v8i16)__msa_sat_u_h((v8u16)adjust2, 7);
  499. temp0_h = __msa_ceqi_h(diff0, 0);
  500. adjust2 =
  501. (v8i16)__msa_bmnz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h);
  502. adjust0 =
  503. (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h);
  504. col_sum0 += adjust0;
  505. running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust2, (v16i8)adjust2);
  506. dst1 = __msa_copy_s_d((v2i64)running_avg_y, 0);
  507. SD(dst1, running_avg_y_ptr);
  508. running_avg_y_ptr += avg_y_stride;
  509. }
  510. temp0_h = col_sum0;
  511. temp0_w = __msa_hadd_s_w(temp0_h, temp0_h);
  512. temp0_d = __msa_hadd_s_d(temp0_w, temp0_w);
  513. temp1_d = __msa_splati_d(temp0_d, 1);
  514. temp0_d += temp1_d;
  515. sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0);
  516. if (abs(sum_diff) > sum_diff_thresh) {
  517. return COPY_BLOCK;
  518. }
  519. } else {
  520. return COPY_BLOCK;
  521. }
  522. }
  523. LD4(sig_start, sig_stride, src0, src1, src2, src3);
  524. sig_start += (4 * sig_stride);
  525. SD4(src0, src1, src2, src3, running_avg_y_start, avg_y_stride);
  526. running_avg_y_start += (4 * avg_y_stride);
  527. LD4(sig_start, sig_stride, src0, src1, src2, src3);
  528. SD4(src0, src1, src2, src3, running_avg_y_start, avg_y_stride);
  529. return FILTER_BLOCK;
  530. }