deblock_msa.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /*
  2. * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <stdlib.h>
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_dsp/mips/macros_msa.h"
  13. extern const int16_t vpx_rv[];
  14. #define VPX_TRANSPOSE8x16_UB_UB( \
  15. in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, out4, \
  16. out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15) \
  17. { \
  18. v8i16 temp0, temp1, temp2, temp3, temp4; \
  19. v8i16 temp5, temp6, temp7, temp8, temp9; \
  20. \
  21. ILVR_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2, \
  22. temp3); \
  23. ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \
  24. ILVRL_W2_SH(temp5, temp4, temp6, temp7); \
  25. ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \
  26. ILVRL_W2_SH(temp5, temp4, temp8, temp9); \
  27. ILVL_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2, \
  28. temp3); \
  29. ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \
  30. ILVRL_W2_UB(temp5, temp4, out8, out10); \
  31. ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \
  32. ILVRL_W2_UB(temp5, temp4, out12, out14); \
  33. out0 = (v16u8)temp6; \
  34. out2 = (v16u8)temp7; \
  35. out4 = (v16u8)temp8; \
  36. out6 = (v16u8)temp9; \
  37. out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8); \
  38. out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10); \
  39. out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12); \
  40. out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14); \
  41. out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
  42. out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
  43. out5 = (v16u8)__msa_ilvl_d((v2i64)out4, (v2i64)out4); \
  44. out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6); \
  45. }
  46. #define VPX_AVER_IF_RETAIN(above2_in, above1_in, src_in, below1_in, below2_in, \
  47. ref, out) \
  48. { \
  49. v16u8 temp0, temp1; \
  50. \
  51. temp1 = __msa_aver_u_b(above2_in, above1_in); \
  52. temp0 = __msa_aver_u_b(below2_in, below1_in); \
  53. temp1 = __msa_aver_u_b(temp1, temp0); \
  54. out = __msa_aver_u_b(src_in, temp1); \
  55. temp0 = __msa_asub_u_b(src_in, above2_in); \
  56. temp1 = __msa_asub_u_b(src_in, above1_in); \
  57. temp0 = (temp0 < ref); \
  58. temp1 = (temp1 < ref); \
  59. temp0 = temp0 & temp1; \
  60. temp1 = __msa_asub_u_b(src_in, below1_in); \
  61. temp1 = (temp1 < ref); \
  62. temp0 = temp0 & temp1; \
  63. temp1 = __msa_asub_u_b(src_in, below2_in); \
  64. temp1 = (temp1 < ref); \
  65. temp0 = temp0 & temp1; \
  66. out = __msa_bmz_v(out, src_in, temp0); \
  67. }
  68. #define TRANSPOSE12x16_B(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \
  69. in10, in11, in12, in13, in14, in15) \
  70. { \
  71. v8i16 temp0, temp1, temp2, temp3, temp4; \
  72. v8i16 temp5, temp6, temp7, temp8, temp9; \
  73. \
  74. ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1); \
  75. ILVRL_H2_SH(temp1, temp0, temp2, temp3); \
  76. ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \
  77. ILVRL_H2_SH(temp1, temp0, temp4, temp5); \
  78. ILVRL_W2_SH(temp4, temp2, temp0, temp1); \
  79. ILVRL_W2_SH(temp5, temp3, temp2, temp3); \
  80. ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \
  81. ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \
  82. ILVRL_H2_SH(temp5, temp4, temp6, temp7); \
  83. ILVR_B2_SH(in13, in12, in15, in14, temp4, temp5); \
  84. ILVRL_H2_SH(temp5, temp4, temp8, temp9); \
  85. ILVRL_W2_SH(temp8, temp6, temp4, temp5); \
  86. ILVRL_W2_SH(temp9, temp7, temp6, temp7); \
  87. ILVL_B2_SH(in1, in0, in3, in2, temp8, temp9); \
  88. ILVR_D2_UB(temp4, temp0, temp5, temp1, in0, in2); \
  89. in1 = (v16u8)__msa_ilvl_d((v2i64)temp4, (v2i64)temp0); \
  90. in3 = (v16u8)__msa_ilvl_d((v2i64)temp5, (v2i64)temp1); \
  91. ILVL_B2_SH(in5, in4, in7, in6, temp0, temp1); \
  92. ILVR_D2_UB(temp6, temp2, temp7, temp3, in4, in6); \
  93. in5 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp2); \
  94. in7 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp3); \
  95. ILVL_B4_SH(in9, in8, in11, in10, in13, in12, in15, in14, temp2, temp3, \
  96. temp4, temp5); \
  97. ILVR_H4_SH(temp9, temp8, temp1, temp0, temp3, temp2, temp5, temp4, temp6, \
  98. temp7, temp8, temp9); \
  99. ILVR_W2_SH(temp7, temp6, temp9, temp8, temp0, temp1); \
  100. in8 = (v16u8)__msa_ilvr_d((v2i64)temp1, (v2i64)temp0); \
  101. in9 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp0); \
  102. ILVL_W2_SH(temp7, temp6, temp9, temp8, temp2, temp3); \
  103. in10 = (v16u8)__msa_ilvr_d((v2i64)temp3, (v2i64)temp2); \
  104. in11 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp2); \
  105. }
  106. #define VPX_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \
  107. in9, in10, in11) \
  108. { \
  109. v8i16 temp0, temp1, temp2, temp3; \
  110. v8i16 temp4, temp5, temp6, temp7; \
  111. \
  112. ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1); \
  113. ILVRL_H2_SH(temp1, temp0, temp2, temp3); \
  114. ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \
  115. ILVRL_H2_SH(temp1, temp0, temp4, temp5); \
  116. ILVRL_W2_SH(temp4, temp2, temp0, temp1); \
  117. ILVRL_W2_SH(temp5, temp3, temp2, temp3); \
  118. ILVL_B2_SH(in1, in0, in3, in2, temp4, temp5); \
  119. temp4 = __msa_ilvr_h(temp5, temp4); \
  120. ILVL_B2_SH(in5, in4, in7, in6, temp6, temp7); \
  121. temp5 = __msa_ilvr_h(temp7, temp6); \
  122. ILVRL_W2_SH(temp5, temp4, temp6, temp7); \
  123. in0 = (v16u8)temp0; \
  124. in2 = (v16u8)temp1; \
  125. in4 = (v16u8)temp2; \
  126. in6 = (v16u8)temp3; \
  127. in8 = (v16u8)temp6; \
  128. in10 = (v16u8)temp7; \
  129. in1 = (v16u8)__msa_ilvl_d((v2i64)temp0, (v2i64)temp0); \
  130. in3 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp1); \
  131. in5 = (v16u8)__msa_ilvl_d((v2i64)temp2, (v2i64)temp2); \
  132. in7 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp3); \
  133. in9 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp6); \
  134. in11 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp7); \
  135. }
  136. static void postproc_down_across_chroma_msa(uint8_t *src_ptr, uint8_t *dst_ptr,
  137. int32_t src_stride,
  138. int32_t dst_stride, int32_t cols,
  139. uint8_t *f) {
  140. uint8_t *p_src = src_ptr;
  141. uint8_t *p_dst = dst_ptr;
  142. uint8_t *f_orig = f;
  143. uint8_t *p_dst_st = dst_ptr;
  144. uint16_t col;
  145. uint64_t out0, out1, out2, out3;
  146. v16u8 above2, above1, below2, below1, src, ref, ref_temp;
  147. v16u8 inter0, inter1, inter2, inter3, inter4, inter5;
  148. v16u8 inter6, inter7, inter8, inter9, inter10, inter11;
  149. for (col = (cols / 16); col--;) {
  150. ref = LD_UB(f);
  151. LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
  152. src = LD_UB(p_src);
  153. LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
  154. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
  155. above2 = LD_UB(p_src + 3 * src_stride);
  156. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
  157. above1 = LD_UB(p_src + 4 * src_stride);
  158. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
  159. src = LD_UB(p_src + 5 * src_stride);
  160. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
  161. below1 = LD_UB(p_src + 6 * src_stride);
  162. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
  163. below2 = LD_UB(p_src + 7 * src_stride);
  164. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
  165. above2 = LD_UB(p_src + 8 * src_stride);
  166. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
  167. above1 = LD_UB(p_src + 9 * src_stride);
  168. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
  169. ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
  170. p_dst, dst_stride);
  171. p_dst += 16;
  172. p_src += 16;
  173. f += 16;
  174. }
  175. if (0 != (cols / 16)) {
  176. ref = LD_UB(f);
  177. LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
  178. src = LD_UB(p_src);
  179. LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
  180. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
  181. above2 = LD_UB(p_src + 3 * src_stride);
  182. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
  183. above1 = LD_UB(p_src + 4 * src_stride);
  184. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
  185. src = LD_UB(p_src + 5 * src_stride);
  186. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
  187. below1 = LD_UB(p_src + 6 * src_stride);
  188. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
  189. below2 = LD_UB(p_src + 7 * src_stride);
  190. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
  191. above2 = LD_UB(p_src + 8 * src_stride);
  192. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
  193. above1 = LD_UB(p_src + 9 * src_stride);
  194. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
  195. out0 = __msa_copy_u_d((v2i64)inter0, 0);
  196. out1 = __msa_copy_u_d((v2i64)inter1, 0);
  197. out2 = __msa_copy_u_d((v2i64)inter2, 0);
  198. out3 = __msa_copy_u_d((v2i64)inter3, 0);
  199. SD4(out0, out1, out2, out3, p_dst, dst_stride);
  200. out0 = __msa_copy_u_d((v2i64)inter4, 0);
  201. out1 = __msa_copy_u_d((v2i64)inter5, 0);
  202. out2 = __msa_copy_u_d((v2i64)inter6, 0);
  203. out3 = __msa_copy_u_d((v2i64)inter7, 0);
  204. SD4(out0, out1, out2, out3, p_dst + 4 * dst_stride, dst_stride);
  205. }
  206. f = f_orig;
  207. p_dst = dst_ptr - 2;
  208. LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5,
  209. inter6, inter7);
  210. for (col = 0; col < (cols / 8); ++col) {
  211. ref = LD_UB(f);
  212. f += 8;
  213. VPX_TRANSPOSE12x8_UB_UB(inter0, inter1, inter2, inter3, inter4, inter5,
  214. inter6, inter7, inter8, inter9, inter10, inter11);
  215. if (0 == col) {
  216. above2 = inter2;
  217. above1 = inter2;
  218. } else {
  219. above2 = inter0;
  220. above1 = inter1;
  221. }
  222. src = inter2;
  223. below1 = inter3;
  224. below2 = inter4;
  225. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
  226. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
  227. above2 = inter5;
  228. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
  229. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
  230. above1 = inter6;
  231. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
  232. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
  233. src = inter7;
  234. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
  235. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
  236. below1 = inter8;
  237. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
  238. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
  239. below2 = inter9;
  240. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
  241. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
  242. if (col == (cols / 8 - 1)) {
  243. above2 = inter9;
  244. } else {
  245. above2 = inter10;
  246. }
  247. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
  248. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
  249. if (col == (cols / 8 - 1)) {
  250. above1 = inter9;
  251. } else {
  252. above1 = inter11;
  253. }
  254. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
  255. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
  256. TRANSPOSE8x8_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7, inter8,
  257. inter9, inter2, inter3, inter4, inter5, inter6, inter7,
  258. inter8, inter9);
  259. p_dst += 8;
  260. LD_UB2(p_dst, dst_stride, inter0, inter1);
  261. ST8x1_UB(inter2, p_dst_st);
  262. ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride));
  263. LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3);
  264. ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride));
  265. ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride));
  266. LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5);
  267. ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride));
  268. ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride));
  269. LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7);
  270. ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride));
  271. ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride));
  272. p_dst_st += 8;
  273. }
  274. }
  275. static void postproc_down_across_luma_msa(uint8_t *src_ptr, uint8_t *dst_ptr,
  276. int32_t src_stride,
  277. int32_t dst_stride, int32_t cols,
  278. uint8_t *f) {
  279. uint8_t *p_src = src_ptr;
  280. uint8_t *p_dst = dst_ptr;
  281. uint8_t *p_dst_st = dst_ptr;
  282. uint8_t *f_orig = f;
  283. uint16_t col;
  284. uint64_t out0, out1, out2, out3;
  285. v16u8 above2, above1, below2, below1;
  286. v16u8 src, ref, ref_temp;
  287. v16u8 inter0, inter1, inter2, inter3, inter4, inter5, inter6;
  288. v16u8 inter7, inter8, inter9, inter10, inter11;
  289. v16u8 inter12, inter13, inter14, inter15;
  290. for (col = (cols / 16); col--;) {
  291. ref = LD_UB(f);
  292. LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
  293. src = LD_UB(p_src);
  294. LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
  295. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
  296. above2 = LD_UB(p_src + 3 * src_stride);
  297. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
  298. above1 = LD_UB(p_src + 4 * src_stride);
  299. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
  300. src = LD_UB(p_src + 5 * src_stride);
  301. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
  302. below1 = LD_UB(p_src + 6 * src_stride);
  303. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
  304. below2 = LD_UB(p_src + 7 * src_stride);
  305. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
  306. above2 = LD_UB(p_src + 8 * src_stride);
  307. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
  308. above1 = LD_UB(p_src + 9 * src_stride);
  309. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
  310. src = LD_UB(p_src + 10 * src_stride);
  311. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8);
  312. below1 = LD_UB(p_src + 11 * src_stride);
  313. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9);
  314. below2 = LD_UB(p_src + 12 * src_stride);
  315. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10);
  316. above2 = LD_UB(p_src + 13 * src_stride);
  317. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11);
  318. above1 = LD_UB(p_src + 14 * src_stride);
  319. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12);
  320. src = LD_UB(p_src + 15 * src_stride);
  321. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13);
  322. below1 = LD_UB(p_src + 16 * src_stride);
  323. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14);
  324. below2 = LD_UB(p_src + 17 * src_stride);
  325. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15);
  326. ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
  327. p_dst, dst_stride);
  328. ST_UB8(inter8, inter9, inter10, inter11, inter12, inter13, inter14, inter15,
  329. p_dst + 8 * dst_stride, dst_stride);
  330. p_src += 16;
  331. p_dst += 16;
  332. f += 16;
  333. }
  334. if (0 != (cols / 16)) {
  335. ref = LD_UB(f);
  336. LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
  337. src = LD_UB(p_src);
  338. LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
  339. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
  340. above2 = LD_UB(p_src + 3 * src_stride);
  341. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
  342. above1 = LD_UB(p_src + 4 * src_stride);
  343. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
  344. src = LD_UB(p_src + 5 * src_stride);
  345. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
  346. below1 = LD_UB(p_src + 6 * src_stride);
  347. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
  348. below2 = LD_UB(p_src + 7 * src_stride);
  349. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
  350. above2 = LD_UB(p_src + 8 * src_stride);
  351. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
  352. above1 = LD_UB(p_src + 9 * src_stride);
  353. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
  354. src = LD_UB(p_src + 10 * src_stride);
  355. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8);
  356. below1 = LD_UB(p_src + 11 * src_stride);
  357. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9);
  358. below2 = LD_UB(p_src + 12 * src_stride);
  359. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10);
  360. above2 = LD_UB(p_src + 13 * src_stride);
  361. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11);
  362. above1 = LD_UB(p_src + 14 * src_stride);
  363. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12);
  364. src = LD_UB(p_src + 15 * src_stride);
  365. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13);
  366. below1 = LD_UB(p_src + 16 * src_stride);
  367. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14);
  368. below2 = LD_UB(p_src + 17 * src_stride);
  369. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15);
  370. out0 = __msa_copy_u_d((v2i64)inter0, 0);
  371. out1 = __msa_copy_u_d((v2i64)inter1, 0);
  372. out2 = __msa_copy_u_d((v2i64)inter2, 0);
  373. out3 = __msa_copy_u_d((v2i64)inter3, 0);
  374. SD4(out0, out1, out2, out3, p_dst, dst_stride);
  375. out0 = __msa_copy_u_d((v2i64)inter4, 0);
  376. out1 = __msa_copy_u_d((v2i64)inter5, 0);
  377. out2 = __msa_copy_u_d((v2i64)inter6, 0);
  378. out3 = __msa_copy_u_d((v2i64)inter7, 0);
  379. SD4(out0, out1, out2, out3, p_dst + 4 * dst_stride, dst_stride);
  380. out0 = __msa_copy_u_d((v2i64)inter8, 0);
  381. out1 = __msa_copy_u_d((v2i64)inter9, 0);
  382. out2 = __msa_copy_u_d((v2i64)inter10, 0);
  383. out3 = __msa_copy_u_d((v2i64)inter11, 0);
  384. SD4(out0, out1, out2, out3, p_dst + 8 * dst_stride, dst_stride);
  385. out0 = __msa_copy_u_d((v2i64)inter12, 0);
  386. out1 = __msa_copy_u_d((v2i64)inter13, 0);
  387. out2 = __msa_copy_u_d((v2i64)inter14, 0);
  388. out3 = __msa_copy_u_d((v2i64)inter15, 0);
  389. SD4(out0, out1, out2, out3, p_dst + 12 * dst_stride, dst_stride);
  390. }
  391. f = f_orig;
  392. p_dst = dst_ptr - 2;
  393. LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5,
  394. inter6, inter7);
  395. LD_UB8(p_dst + 8 * dst_stride, dst_stride, inter8, inter9, inter10, inter11,
  396. inter12, inter13, inter14, inter15);
  397. for (col = 0; col < cols / 8; ++col) {
  398. ref = LD_UB(f);
  399. f += 8;
  400. TRANSPOSE12x16_B(inter0, inter1, inter2, inter3, inter4, inter5, inter6,
  401. inter7, inter8, inter9, inter10, inter11, inter12, inter13,
  402. inter14, inter15);
  403. if (0 == col) {
  404. above2 = inter2;
  405. above1 = inter2;
  406. } else {
  407. above2 = inter0;
  408. above1 = inter1;
  409. }
  410. src = inter2;
  411. below1 = inter3;
  412. below2 = inter4;
  413. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
  414. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
  415. above2 = inter5;
  416. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
  417. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
  418. above1 = inter6;
  419. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
  420. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
  421. src = inter7;
  422. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
  423. VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
  424. below1 = inter8;
  425. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
  426. VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
  427. below2 = inter9;
  428. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
  429. VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
  430. if (col == (cols / 8 - 1)) {
  431. above2 = inter9;
  432. } else {
  433. above2 = inter10;
  434. }
  435. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
  436. VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
  437. if (col == (cols / 8 - 1)) {
  438. above1 = inter9;
  439. } else {
  440. above1 = inter11;
  441. }
  442. ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
  443. VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
  444. VPX_TRANSPOSE8x16_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7,
  445. inter8, inter9, inter2, inter3, inter4, inter5,
  446. inter6, inter7, inter8, inter9, inter10, inter11,
  447. inter12, inter13, inter14, inter15, above2, above1);
  448. p_dst += 8;
  449. LD_UB2(p_dst, dst_stride, inter0, inter1);
  450. ST8x1_UB(inter2, p_dst_st);
  451. ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride));
  452. LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3);
  453. ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride));
  454. ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride));
  455. LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5);
  456. ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride));
  457. ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride));
  458. LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7);
  459. ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride));
  460. ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride));
  461. LD_UB2(p_dst + 8 * dst_stride, dst_stride, inter8, inter9);
  462. ST8x1_UB(inter10, (p_dst_st + 8 * dst_stride));
  463. ST8x1_UB(inter11, (p_dst_st + 9 * dst_stride));
  464. LD_UB2(p_dst + 10 * dst_stride, dst_stride, inter10, inter11);
  465. ST8x1_UB(inter12, (p_dst_st + 10 * dst_stride));
  466. ST8x1_UB(inter13, (p_dst_st + 11 * dst_stride));
  467. LD_UB2(p_dst + 12 * dst_stride, dst_stride, inter12, inter13);
  468. ST8x1_UB(inter14, (p_dst_st + 12 * dst_stride));
  469. ST8x1_UB(inter15, (p_dst_st + 13 * dst_stride));
  470. LD_UB2(p_dst + 14 * dst_stride, dst_stride, inter14, inter15);
  471. ST8x1_UB(above2, (p_dst_st + 14 * dst_stride));
  472. ST8x1_UB(above1, (p_dst_st + 15 * dst_stride));
  473. p_dst_st += 8;
  474. }
  475. }
  476. void vpx_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst,
  477. int32_t src_stride,
  478. int32_t dst_stride, int32_t cols,
  479. uint8_t *f, int32_t size) {
  480. if (8 == size) {
  481. postproc_down_across_chroma_msa(src, dst, src_stride, dst_stride, cols, f);
  482. } else if (16 == size) {
  483. postproc_down_across_luma_msa(src, dst, src_stride, dst_stride, cols, f);
  484. }
  485. }
  486. void vpx_mbpost_proc_across_ip_msa(uint8_t *src, int32_t pitch, int32_t rows,
  487. int32_t cols, int32_t flimit) {
  488. int32_t row, col, cnt;
  489. uint8_t *src_dup = src;
  490. v16u8 src0, src1, tmp_orig;
  491. v16u8 tmp = { 0 };
  492. v16i8 zero = { 0 };
  493. v8u16 sum_h, src_r_h, src_l_h;
  494. v4u32 src_r_w;
  495. v4i32 flimit_vec;
  496. flimit_vec = __msa_fill_w(flimit);
  497. for (row = rows; row--;) {
  498. int32_t sum_sq;
  499. int32_t sum = 0;
  500. src0 = (v16u8)__msa_fill_b(src_dup[0]);
  501. ST8x1_UB(src0, (src_dup - 8));
  502. src0 = (v16u8)__msa_fill_b(src_dup[cols - 1]);
  503. ST_UB(src0, src_dup + cols);
  504. src_dup[cols + 16] = src_dup[cols - 1];
  505. tmp_orig = (v16u8)__msa_ldi_b(0);
  506. tmp_orig[15] = tmp[15];
  507. src1 = LD_UB(src_dup - 8);
  508. src1[15] = 0;
  509. ILVRL_B2_UH(zero, src1, src_r_h, src_l_h);
  510. src_r_w = __msa_dotp_u_w(src_r_h, src_r_h);
  511. src_r_w += __msa_dotp_u_w(src_l_h, src_l_h);
  512. sum_sq = HADD_SW_S32(src_r_w) + 16;
  513. sum_h = __msa_hadd_u_h(src1, src1);
  514. sum = HADD_UH_U32(sum_h);
  515. {
  516. v16u8 src7, src8, src_r, src_l;
  517. v16i8 mask;
  518. v8u16 add_r, add_l;
  519. v8i16 sub_r, sub_l, sum_r, sum_l, mask0, mask1;
  520. v4i32 sum_sq0, sum_sq1, sum_sq2, sum_sq3;
  521. v4i32 sub0, sub1, sub2, sub3;
  522. v4i32 sum0_w, sum1_w, sum2_w, sum3_w;
  523. v4i32 mul0, mul1, mul2, mul3;
  524. v4i32 total0, total1, total2, total3;
  525. v8i16 const8 = __msa_fill_h(8);
  526. src7 = LD_UB(src_dup + 7);
  527. src8 = LD_UB(src_dup - 8);
  528. for (col = 0; col < (cols >> 4); ++col) {
  529. ILVRL_B2_UB(src7, src8, src_r, src_l);
  530. HSUB_UB2_SH(src_r, src_l, sub_r, sub_l);
  531. sum_r[0] = sum + sub_r[0];
  532. for (cnt = 0; cnt < 7; ++cnt) {
  533. sum_r[cnt + 1] = sum_r[cnt] + sub_r[cnt + 1];
  534. }
  535. sum_l[0] = sum_r[7] + sub_l[0];
  536. for (cnt = 0; cnt < 7; ++cnt) {
  537. sum_l[cnt + 1] = sum_l[cnt] + sub_l[cnt + 1];
  538. }
  539. sum = sum_l[7];
  540. src1 = LD_UB(src_dup + 16 * col);
  541. ILVRL_B2_UH(zero, src1, src_r_h, src_l_h);
  542. src7 = (v16u8)((const8 + sum_r + (v8i16)src_r_h) >> 4);
  543. src8 = (v16u8)((const8 + sum_l + (v8i16)src_l_h) >> 4);
  544. tmp = (v16u8)__msa_pckev_b((v16i8)src8, (v16i8)src7);
  545. HADD_UB2_UH(src_r, src_l, add_r, add_l);
  546. UNPCK_SH_SW(sub_r, sub0, sub1);
  547. UNPCK_SH_SW(sub_l, sub2, sub3);
  548. ILVR_H2_SW(zero, add_r, zero, add_l, sum0_w, sum2_w);
  549. ILVL_H2_SW(zero, add_r, zero, add_l, sum1_w, sum3_w);
  550. MUL4(sum0_w, sub0, sum1_w, sub1, sum2_w, sub2, sum3_w, sub3, mul0, mul1,
  551. mul2, mul3);
  552. sum_sq0[0] = sum_sq + mul0[0];
  553. for (cnt = 0; cnt < 3; ++cnt) {
  554. sum_sq0[cnt + 1] = sum_sq0[cnt] + mul0[cnt + 1];
  555. }
  556. sum_sq1[0] = sum_sq0[3] + mul1[0];
  557. for (cnt = 0; cnt < 3; ++cnt) {
  558. sum_sq1[cnt + 1] = sum_sq1[cnt] + mul1[cnt + 1];
  559. }
  560. sum_sq2[0] = sum_sq1[3] + mul2[0];
  561. for (cnt = 0; cnt < 3; ++cnt) {
  562. sum_sq2[cnt + 1] = sum_sq2[cnt] + mul2[cnt + 1];
  563. }
  564. sum_sq3[0] = sum_sq2[3] + mul3[0];
  565. for (cnt = 0; cnt < 3; ++cnt) {
  566. sum_sq3[cnt + 1] = sum_sq3[cnt] + mul3[cnt + 1];
  567. }
  568. sum_sq = sum_sq3[3];
  569. UNPCK_SH_SW(sum_r, sum0_w, sum1_w);
  570. UNPCK_SH_SW(sum_l, sum2_w, sum3_w);
  571. total0 = sum_sq0 * __msa_ldi_w(15);
  572. total0 -= sum0_w * sum0_w;
  573. total1 = sum_sq1 * __msa_ldi_w(15);
  574. total1 -= sum1_w * sum1_w;
  575. total2 = sum_sq2 * __msa_ldi_w(15);
  576. total2 -= sum2_w * sum2_w;
  577. total3 = sum_sq3 * __msa_ldi_w(15);
  578. total3 -= sum3_w * sum3_w;
  579. total0 = (total0 < flimit_vec);
  580. total1 = (total1 < flimit_vec);
  581. total2 = (total2 < flimit_vec);
  582. total3 = (total3 < flimit_vec);
  583. PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1);
  584. mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0);
  585. tmp = __msa_bmz_v(tmp, src1, (v16u8)mask);
  586. if (col == 0) {
  587. uint64_t src_d;
  588. src_d = __msa_copy_u_d((v2i64)tmp_orig, 1);
  589. SD(src_d, (src_dup - 8));
  590. }
  591. src7 = LD_UB(src_dup + 16 * (col + 1) + 7);
  592. src8 = LD_UB(src_dup + 16 * (col + 1) - 8);
  593. ST_UB(tmp, (src_dup + (16 * col)));
  594. }
  595. src_dup += pitch;
  596. }
  597. }
  598. }
  599. void vpx_mbpost_proc_down_msa(uint8_t *dst_ptr, int32_t pitch, int32_t rows,
  600. int32_t cols, int32_t flimit) {
  601. int32_t row, col, cnt, i;
  602. v4i32 flimit_vec;
  603. v16u8 dst7, dst8, dst_r_b, dst_l_b;
  604. v16i8 mask;
  605. v8u16 add_r, add_l;
  606. v8i16 dst_r_h, dst_l_h, sub_r, sub_l, mask0, mask1;
  607. v4i32 sub0, sub1, sub2, sub3, total0, total1, total2, total3;
  608. flimit_vec = __msa_fill_w(flimit);
  609. for (col = 0; col < (cols >> 4); ++col) {
  610. uint8_t *dst_tmp = &dst_ptr[col << 4];
  611. v16u8 dst;
  612. v16i8 zero = { 0 };
  613. v16u8 tmp[16];
  614. v8i16 mult0, mult1, rv2_0, rv2_1;
  615. v8i16 sum0_h = { 0 };
  616. v8i16 sum1_h = { 0 };
  617. v4i32 mul0 = { 0 };
  618. v4i32 mul1 = { 0 };
  619. v4i32 mul2 = { 0 };
  620. v4i32 mul3 = { 0 };
  621. v4i32 sum0_w, sum1_w, sum2_w, sum3_w;
  622. v4i32 add0, add1, add2, add3;
  623. const int16_t *rv2[16];
  624. dst = LD_UB(dst_tmp);
  625. for (cnt = (col << 4), i = 0; i < 16; ++cnt) {
  626. rv2[i] = vpx_rv + (i & 7);
  627. ++i;
  628. }
  629. for (cnt = -8; cnt < 0; ++cnt) {
  630. ST_UB(dst, dst_tmp + cnt * pitch);
  631. }
  632. dst = LD_UB((dst_tmp + (rows - 1) * pitch));
  633. for (cnt = rows; cnt < rows + 17; ++cnt) {
  634. ST_UB(dst, dst_tmp + cnt * pitch);
  635. }
  636. for (cnt = -8; cnt <= 6; ++cnt) {
  637. dst = LD_UB(dst_tmp + (cnt * pitch));
  638. UNPCK_UB_SH(dst, dst_r_h, dst_l_h);
  639. MUL2(dst_r_h, dst_r_h, dst_l_h, dst_l_h, mult0, mult1);
  640. mul0 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult0);
  641. mul1 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult0);
  642. mul2 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult1);
  643. mul3 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult1);
  644. ADD2(sum0_h, dst_r_h, sum1_h, dst_l_h, sum0_h, sum1_h);
  645. }
  646. for (row = 0; row < (rows + 8); ++row) {
  647. for (i = 0; i < 8; ++i) {
  648. rv2_0[i] = *(rv2[i] + (row & 127));
  649. rv2_1[i] = *(rv2[i + 8] + (row & 127));
  650. }
  651. dst7 = LD_UB(dst_tmp + (7 * pitch));
  652. dst8 = LD_UB(dst_tmp - (8 * pitch));
  653. ILVRL_B2_UB(dst7, dst8, dst_r_b, dst_l_b);
  654. HSUB_UB2_SH(dst_r_b, dst_l_b, sub_r, sub_l);
  655. UNPCK_SH_SW(sub_r, sub0, sub1);
  656. UNPCK_SH_SW(sub_l, sub2, sub3);
  657. sum0_h += sub_r;
  658. sum1_h += sub_l;
  659. HADD_UB2_UH(dst_r_b, dst_l_b, add_r, add_l);
  660. ILVRL_H2_SW(zero, add_r, add0, add1);
  661. ILVRL_H2_SW(zero, add_l, add2, add3);
  662. mul0 += add0 * sub0;
  663. mul1 += add1 * sub1;
  664. mul2 += add2 * sub2;
  665. mul3 += add3 * sub3;
  666. dst = LD_UB(dst_tmp);
  667. ILVRL_B2_SH(zero, dst, dst_r_h, dst_l_h);
  668. dst7 = (v16u8)((rv2_0 + sum0_h + dst_r_h) >> 4);
  669. dst8 = (v16u8)((rv2_1 + sum1_h + dst_l_h) >> 4);
  670. tmp[row & 15] = (v16u8)__msa_pckev_b((v16i8)dst8, (v16i8)dst7);
  671. UNPCK_SH_SW(sum0_h, sum0_w, sum1_w);
  672. UNPCK_SH_SW(sum1_h, sum2_w, sum3_w);
  673. total0 = mul0 * __msa_ldi_w(15);
  674. total0 -= sum0_w * sum0_w;
  675. total1 = mul1 * __msa_ldi_w(15);
  676. total1 -= sum1_w * sum1_w;
  677. total2 = mul2 * __msa_ldi_w(15);
  678. total2 -= sum2_w * sum2_w;
  679. total3 = mul3 * __msa_ldi_w(15);
  680. total3 -= sum3_w * sum3_w;
  681. total0 = (total0 < flimit_vec);
  682. total1 = (total1 < flimit_vec);
  683. total2 = (total2 < flimit_vec);
  684. total3 = (total3 < flimit_vec);
  685. PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1);
  686. mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0);
  687. tmp[row & 15] = __msa_bmz_v(tmp[row & 15], dst, (v16u8)mask);
  688. if (row >= 8) {
  689. ST_UB(tmp[(row - 8) & 15], (dst_tmp - 8 * pitch));
  690. }
  691. dst_tmp += pitch;
  692. }
  693. }
  694. }