variance_msa.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "./vpx_dsp_rtcd.h"
  11. #include "vpx_dsp/mips/macros_msa.h"
  12. #define CALC_MSE_B(src, ref, var) \
  13. { \
  14. v16u8 src_l0_m, src_l1_m; \
  15. v8i16 res_l0_m, res_l1_m; \
  16. \
  17. ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \
  18. HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \
  19. DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
  20. }
  21. #define CALC_MSE_AVG_B(src, ref, var, sub) \
  22. { \
  23. v16u8 src_l0_m, src_l1_m; \
  24. v8i16 res_l0_m, res_l1_m; \
  25. \
  26. ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \
  27. HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \
  28. DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
  29. \
  30. sub += res_l0_m + res_l1_m; \
  31. }
  32. #define VARIANCE_WxH(sse, diff, shift) \
  33. (sse) - (((uint32_t)(diff) * (diff)) >> (shift))
  34. #define VARIANCE_LARGE_WxH(sse, diff, shift) \
  35. (sse) - (((int64_t)(diff) * (diff)) >> (shift))
  36. static uint32_t sse_diff_4width_msa(const uint8_t *src_ptr, int32_t src_stride,
  37. const uint8_t *ref_ptr, int32_t ref_stride,
  38. int32_t height, int32_t *diff) {
  39. uint32_t src0, src1, src2, src3;
  40. uint32_t ref0, ref1, ref2, ref3;
  41. int32_t ht_cnt;
  42. v16u8 src = { 0 };
  43. v16u8 ref = { 0 };
  44. v8i16 avg = { 0 };
  45. v4i32 vec, var = { 0 };
  46. for (ht_cnt = (height >> 2); ht_cnt--;) {
  47. LW4(src_ptr, src_stride, src0, src1, src2, src3);
  48. src_ptr += (4 * src_stride);
  49. LW4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
  50. ref_ptr += (4 * ref_stride);
  51. INSERT_W4_UB(src0, src1, src2, src3, src);
  52. INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
  53. CALC_MSE_AVG_B(src, ref, var, avg);
  54. }
  55. vec = __msa_hadd_s_w(avg, avg);
  56. *diff = HADD_SW_S32(vec);
  57. return HADD_SW_S32(var);
  58. }
  59. static uint32_t sse_diff_8width_msa(const uint8_t *src_ptr, int32_t src_stride,
  60. const uint8_t *ref_ptr, int32_t ref_stride,
  61. int32_t height, int32_t *diff) {
  62. int32_t ht_cnt;
  63. v16u8 src0, src1, src2, src3;
  64. v16u8 ref0, ref1, ref2, ref3;
  65. v8i16 avg = { 0 };
  66. v4i32 vec, var = { 0 };
  67. for (ht_cnt = (height >> 2); ht_cnt--;) {
  68. LD_UB4(src_ptr, src_stride, src0, src1, src2, src3);
  69. src_ptr += (4 * src_stride);
  70. LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
  71. ref_ptr += (4 * ref_stride);
  72. PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1,
  73. ref0, ref1);
  74. CALC_MSE_AVG_B(src0, ref0, var, avg);
  75. CALC_MSE_AVG_B(src1, ref1, var, avg);
  76. }
  77. vec = __msa_hadd_s_w(avg, avg);
  78. *diff = HADD_SW_S32(vec);
  79. return HADD_SW_S32(var);
  80. }
  81. static uint32_t sse_diff_16width_msa(const uint8_t *src_ptr, int32_t src_stride,
  82. const uint8_t *ref_ptr, int32_t ref_stride,
  83. int32_t height, int32_t *diff) {
  84. int32_t ht_cnt;
  85. v16u8 src, ref;
  86. v8i16 avg = { 0 };
  87. v4i32 vec, var = { 0 };
  88. for (ht_cnt = (height >> 2); ht_cnt--;) {
  89. src = LD_UB(src_ptr);
  90. src_ptr += src_stride;
  91. ref = LD_UB(ref_ptr);
  92. ref_ptr += ref_stride;
  93. CALC_MSE_AVG_B(src, ref, var, avg);
  94. src = LD_UB(src_ptr);
  95. src_ptr += src_stride;
  96. ref = LD_UB(ref_ptr);
  97. ref_ptr += ref_stride;
  98. CALC_MSE_AVG_B(src, ref, var, avg);
  99. src = LD_UB(src_ptr);
  100. src_ptr += src_stride;
  101. ref = LD_UB(ref_ptr);
  102. ref_ptr += ref_stride;
  103. CALC_MSE_AVG_B(src, ref, var, avg);
  104. src = LD_UB(src_ptr);
  105. src_ptr += src_stride;
  106. ref = LD_UB(ref_ptr);
  107. ref_ptr += ref_stride;
  108. CALC_MSE_AVG_B(src, ref, var, avg);
  109. }
  110. vec = __msa_hadd_s_w(avg, avg);
  111. *diff = HADD_SW_S32(vec);
  112. return HADD_SW_S32(var);
  113. }
  114. static uint32_t sse_diff_32width_msa(const uint8_t *src_ptr, int32_t src_stride,
  115. const uint8_t *ref_ptr, int32_t ref_stride,
  116. int32_t height, int32_t *diff) {
  117. int32_t ht_cnt;
  118. v16u8 src0, src1, ref0, ref1;
  119. v8i16 avg = { 0 };
  120. v4i32 vec, var = { 0 };
  121. for (ht_cnt = (height >> 2); ht_cnt--;) {
  122. LD_UB2(src_ptr, 16, src0, src1);
  123. src_ptr += src_stride;
  124. LD_UB2(ref_ptr, 16, ref0, ref1);
  125. ref_ptr += ref_stride;
  126. CALC_MSE_AVG_B(src0, ref0, var, avg);
  127. CALC_MSE_AVG_B(src1, ref1, var, avg);
  128. LD_UB2(src_ptr, 16, src0, src1);
  129. src_ptr += src_stride;
  130. LD_UB2(ref_ptr, 16, ref0, ref1);
  131. ref_ptr += ref_stride;
  132. CALC_MSE_AVG_B(src0, ref0, var, avg);
  133. CALC_MSE_AVG_B(src1, ref1, var, avg);
  134. LD_UB2(src_ptr, 16, src0, src1);
  135. src_ptr += src_stride;
  136. LD_UB2(ref_ptr, 16, ref0, ref1);
  137. ref_ptr += ref_stride;
  138. CALC_MSE_AVG_B(src0, ref0, var, avg);
  139. CALC_MSE_AVG_B(src1, ref1, var, avg);
  140. LD_UB2(src_ptr, 16, src0, src1);
  141. src_ptr += src_stride;
  142. LD_UB2(ref_ptr, 16, ref0, ref1);
  143. ref_ptr += ref_stride;
  144. CALC_MSE_AVG_B(src0, ref0, var, avg);
  145. CALC_MSE_AVG_B(src1, ref1, var, avg);
  146. }
  147. vec = __msa_hadd_s_w(avg, avg);
  148. *diff = HADD_SW_S32(vec);
  149. return HADD_SW_S32(var);
  150. }
  151. static uint32_t sse_diff_32x64_msa(const uint8_t *src_ptr, int32_t src_stride,
  152. const uint8_t *ref_ptr, int32_t ref_stride,
  153. int32_t *diff) {
  154. int32_t ht_cnt;
  155. v16u8 src0, src1, ref0, ref1;
  156. v8i16 avg0 = { 0 };
  157. v8i16 avg1 = { 0 };
  158. v4i32 vec, var = { 0 };
  159. for (ht_cnt = 16; ht_cnt--;) {
  160. LD_UB2(src_ptr, 16, src0, src1);
  161. src_ptr += src_stride;
  162. LD_UB2(ref_ptr, 16, ref0, ref1);
  163. ref_ptr += ref_stride;
  164. CALC_MSE_AVG_B(src0, ref0, var, avg0);
  165. CALC_MSE_AVG_B(src1, ref1, var, avg1);
  166. LD_UB2(src_ptr, 16, src0, src1);
  167. src_ptr += src_stride;
  168. LD_UB2(ref_ptr, 16, ref0, ref1);
  169. ref_ptr += ref_stride;
  170. CALC_MSE_AVG_B(src0, ref0, var, avg0);
  171. CALC_MSE_AVG_B(src1, ref1, var, avg1);
  172. LD_UB2(src_ptr, 16, src0, src1);
  173. src_ptr += src_stride;
  174. LD_UB2(ref_ptr, 16, ref0, ref1);
  175. ref_ptr += ref_stride;
  176. CALC_MSE_AVG_B(src0, ref0, var, avg0);
  177. CALC_MSE_AVG_B(src1, ref1, var, avg1);
  178. LD_UB2(src_ptr, 16, src0, src1);
  179. src_ptr += src_stride;
  180. LD_UB2(ref_ptr, 16, ref0, ref1);
  181. ref_ptr += ref_stride;
  182. CALC_MSE_AVG_B(src0, ref0, var, avg0);
  183. CALC_MSE_AVG_B(src1, ref1, var, avg1);
  184. }
  185. vec = __msa_hadd_s_w(avg0, avg0);
  186. vec += __msa_hadd_s_w(avg1, avg1);
  187. *diff = HADD_SW_S32(vec);
  188. return HADD_SW_S32(var);
  189. }
  190. static uint32_t sse_diff_64x32_msa(const uint8_t *src_ptr, int32_t src_stride,
  191. const uint8_t *ref_ptr, int32_t ref_stride,
  192. int32_t *diff) {
  193. int32_t ht_cnt;
  194. v16u8 src0, src1, src2, src3;
  195. v16u8 ref0, ref1, ref2, ref3;
  196. v8i16 avg0 = { 0 };
  197. v8i16 avg1 = { 0 };
  198. v4i32 vec, var = { 0 };
  199. for (ht_cnt = 16; ht_cnt--;) {
  200. LD_UB4(src_ptr, 16, src0, src1, src2, src3);
  201. src_ptr += src_stride;
  202. LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
  203. ref_ptr += ref_stride;
  204. CALC_MSE_AVG_B(src0, ref0, var, avg0);
  205. CALC_MSE_AVG_B(src2, ref2, var, avg0);
  206. CALC_MSE_AVG_B(src1, ref1, var, avg1);
  207. CALC_MSE_AVG_B(src3, ref3, var, avg1);
  208. LD_UB4(src_ptr, 16, src0, src1, src2, src3);
  209. src_ptr += src_stride;
  210. LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
  211. ref_ptr += ref_stride;
  212. CALC_MSE_AVG_B(src0, ref0, var, avg0);
  213. CALC_MSE_AVG_B(src2, ref2, var, avg0);
  214. CALC_MSE_AVG_B(src1, ref1, var, avg1);
  215. CALC_MSE_AVG_B(src3, ref3, var, avg1);
  216. }
  217. vec = __msa_hadd_s_w(avg0, avg0);
  218. vec += __msa_hadd_s_w(avg1, avg1);
  219. *diff = HADD_SW_S32(vec);
  220. return HADD_SW_S32(var);
  221. }
  222. static uint32_t sse_diff_64x64_msa(const uint8_t *src_ptr, int32_t src_stride,
  223. const uint8_t *ref_ptr, int32_t ref_stride,
  224. int32_t *diff) {
  225. int32_t ht_cnt;
  226. v16u8 src0, src1, src2, src3;
  227. v16u8 ref0, ref1, ref2, ref3;
  228. v8i16 avg0 = { 0 };
  229. v8i16 avg1 = { 0 };
  230. v8i16 avg2 = { 0 };
  231. v8i16 avg3 = { 0 };
  232. v4i32 vec, var = { 0 };
  233. for (ht_cnt = 32; ht_cnt--;) {
  234. LD_UB4(src_ptr, 16, src0, src1, src2, src3);
  235. src_ptr += src_stride;
  236. LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
  237. ref_ptr += ref_stride;
  238. CALC_MSE_AVG_B(src0, ref0, var, avg0);
  239. CALC_MSE_AVG_B(src1, ref1, var, avg1);
  240. CALC_MSE_AVG_B(src2, ref2, var, avg2);
  241. CALC_MSE_AVG_B(src3, ref3, var, avg3);
  242. LD_UB4(src_ptr, 16, src0, src1, src2, src3);
  243. src_ptr += src_stride;
  244. LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
  245. ref_ptr += ref_stride;
  246. CALC_MSE_AVG_B(src0, ref0, var, avg0);
  247. CALC_MSE_AVG_B(src1, ref1, var, avg1);
  248. CALC_MSE_AVG_B(src2, ref2, var, avg2);
  249. CALC_MSE_AVG_B(src3, ref3, var, avg3);
  250. }
  251. vec = __msa_hadd_s_w(avg0, avg0);
  252. vec += __msa_hadd_s_w(avg1, avg1);
  253. vec += __msa_hadd_s_w(avg2, avg2);
  254. vec += __msa_hadd_s_w(avg3, avg3);
  255. *diff = HADD_SW_S32(vec);
  256. return HADD_SW_S32(var);
  257. }
  258. static uint32_t get_mb_ss_msa(const int16_t *src) {
  259. uint32_t sum, cnt;
  260. v8i16 src0, src1, src2, src3;
  261. v4i32 src0_l, src1_l, src2_l, src3_l;
  262. v4i32 src0_r, src1_r, src2_r, src3_r;
  263. v2i64 sq_src_l = { 0 };
  264. v2i64 sq_src_r = { 0 };
  265. for (cnt = 8; cnt--;) {
  266. LD_SH4(src, 8, src0, src1, src2, src3);
  267. src += 4 * 8;
  268. UNPCK_SH_SW(src0, src0_l, src0_r);
  269. UNPCK_SH_SW(src1, src1_l, src1_r);
  270. UNPCK_SH_SW(src2, src2_l, src2_r);
  271. UNPCK_SH_SW(src3, src3_l, src3_r);
  272. DPADD_SD2_SD(src0_l, src0_r, sq_src_l, sq_src_r);
  273. DPADD_SD2_SD(src1_l, src1_r, sq_src_l, sq_src_r);
  274. DPADD_SD2_SD(src2_l, src2_r, sq_src_l, sq_src_r);
  275. DPADD_SD2_SD(src3_l, src3_r, sq_src_l, sq_src_r);
  276. }
  277. sq_src_l += __msa_splati_d(sq_src_l, 1);
  278. sq_src_r += __msa_splati_d(sq_src_r, 1);
  279. sum = __msa_copy_s_d(sq_src_l, 0);
  280. sum += __msa_copy_s_d(sq_src_r, 0);
  281. return sum;
  282. }
  283. static uint32_t sse_4width_msa(const uint8_t *src_ptr, int32_t src_stride,
  284. const uint8_t *ref_ptr, int32_t ref_stride,
  285. int32_t height) {
  286. int32_t ht_cnt;
  287. uint32_t src0, src1, src2, src3;
  288. uint32_t ref0, ref1, ref2, ref3;
  289. v16u8 src = { 0 };
  290. v16u8 ref = { 0 };
  291. v4i32 var = { 0 };
  292. for (ht_cnt = (height >> 2); ht_cnt--;) {
  293. LW4(src_ptr, src_stride, src0, src1, src2, src3);
  294. src_ptr += (4 * src_stride);
  295. LW4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
  296. ref_ptr += (4 * ref_stride);
  297. INSERT_W4_UB(src0, src1, src2, src3, src);
  298. INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
  299. CALC_MSE_B(src, ref, var);
  300. }
  301. return HADD_SW_S32(var);
  302. }
  303. static uint32_t sse_8width_msa(const uint8_t *src_ptr, int32_t src_stride,
  304. const uint8_t *ref_ptr, int32_t ref_stride,
  305. int32_t height) {
  306. int32_t ht_cnt;
  307. v16u8 src0, src1, src2, src3;
  308. v16u8 ref0, ref1, ref2, ref3;
  309. v4i32 var = { 0 };
  310. for (ht_cnt = (height >> 2); ht_cnt--;) {
  311. LD_UB4(src_ptr, src_stride, src0, src1, src2, src3);
  312. src_ptr += (4 * src_stride);
  313. LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
  314. ref_ptr += (4 * ref_stride);
  315. PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1,
  316. ref0, ref1);
  317. CALC_MSE_B(src0, ref0, var);
  318. CALC_MSE_B(src1, ref1, var);
  319. }
  320. return HADD_SW_S32(var);
  321. }
  322. static uint32_t sse_16width_msa(const uint8_t *src_ptr, int32_t src_stride,
  323. const uint8_t *ref_ptr, int32_t ref_stride,
  324. int32_t height) {
  325. int32_t ht_cnt;
  326. v16u8 src, ref;
  327. v4i32 var = { 0 };
  328. for (ht_cnt = (height >> 2); ht_cnt--;) {
  329. src = LD_UB(src_ptr);
  330. src_ptr += src_stride;
  331. ref = LD_UB(ref_ptr);
  332. ref_ptr += ref_stride;
  333. CALC_MSE_B(src, ref, var);
  334. src = LD_UB(src_ptr);
  335. src_ptr += src_stride;
  336. ref = LD_UB(ref_ptr);
  337. ref_ptr += ref_stride;
  338. CALC_MSE_B(src, ref, var);
  339. src = LD_UB(src_ptr);
  340. src_ptr += src_stride;
  341. ref = LD_UB(ref_ptr);
  342. ref_ptr += ref_stride;
  343. CALC_MSE_B(src, ref, var);
  344. src = LD_UB(src_ptr);
  345. src_ptr += src_stride;
  346. ref = LD_UB(ref_ptr);
  347. ref_ptr += ref_stride;
  348. CALC_MSE_B(src, ref, var);
  349. }
  350. return HADD_SW_S32(var);
  351. }
  352. static uint32_t sse_32width_msa(const uint8_t *src_ptr, int32_t src_stride,
  353. const uint8_t *ref_ptr, int32_t ref_stride,
  354. int32_t height) {
  355. int32_t ht_cnt;
  356. v16u8 src0, src1, ref0, ref1;
  357. v4i32 var = { 0 };
  358. for (ht_cnt = (height >> 2); ht_cnt--;) {
  359. LD_UB2(src_ptr, 16, src0, src1);
  360. src_ptr += src_stride;
  361. LD_UB2(ref_ptr, 16, ref0, ref1);
  362. ref_ptr += ref_stride;
  363. CALC_MSE_B(src0, ref0, var);
  364. CALC_MSE_B(src1, ref1, var);
  365. LD_UB2(src_ptr, 16, src0, src1);
  366. src_ptr += src_stride;
  367. LD_UB2(ref_ptr, 16, ref0, ref1);
  368. ref_ptr += ref_stride;
  369. CALC_MSE_B(src0, ref0, var);
  370. CALC_MSE_B(src1, ref1, var);
  371. LD_UB2(src_ptr, 16, src0, src1);
  372. src_ptr += src_stride;
  373. LD_UB2(ref_ptr, 16, ref0, ref1);
  374. ref_ptr += ref_stride;
  375. CALC_MSE_B(src0, ref0, var);
  376. CALC_MSE_B(src1, ref1, var);
  377. LD_UB2(src_ptr, 16, src0, src1);
  378. src_ptr += src_stride;
  379. LD_UB2(ref_ptr, 16, ref0, ref1);
  380. ref_ptr += ref_stride;
  381. CALC_MSE_B(src0, ref0, var);
  382. CALC_MSE_B(src1, ref1, var);
  383. }
  384. return HADD_SW_S32(var);
  385. }
  386. static uint32_t sse_64width_msa(const uint8_t *src_ptr, int32_t src_stride,
  387. const uint8_t *ref_ptr, int32_t ref_stride,
  388. int32_t height) {
  389. int32_t ht_cnt;
  390. v16u8 src0, src1, src2, src3;
  391. v16u8 ref0, ref1, ref2, ref3;
  392. v4i32 var = { 0 };
  393. for (ht_cnt = height >> 1; ht_cnt--;) {
  394. LD_UB4(src_ptr, 16, src0, src1, src2, src3);
  395. src_ptr += src_stride;
  396. LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
  397. ref_ptr += ref_stride;
  398. CALC_MSE_B(src0, ref0, var);
  399. CALC_MSE_B(src2, ref2, var);
  400. CALC_MSE_B(src1, ref1, var);
  401. CALC_MSE_B(src3, ref3, var);
  402. LD_UB4(src_ptr, 16, src0, src1, src2, src3);
  403. src_ptr += src_stride;
  404. LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
  405. ref_ptr += ref_stride;
  406. CALC_MSE_B(src0, ref0, var);
  407. CALC_MSE_B(src2, ref2, var);
  408. CALC_MSE_B(src1, ref1, var);
  409. CALC_MSE_B(src3, ref3, var);
  410. }
  411. return HADD_SW_S32(var);
  412. }
  413. uint32_t vpx_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride,
  414. const uint8_t *ref_ptr, int32_t ref_stride) {
  415. uint32_t src0, src1, src2, src3;
  416. uint32_t ref0, ref1, ref2, ref3;
  417. v16i8 src = { 0 };
  418. v16i8 ref = { 0 };
  419. v4i32 err0 = { 0 };
  420. LW4(src_ptr, src_stride, src0, src1, src2, src3);
  421. LW4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
  422. INSERT_W4_SB(src0, src1, src2, src3, src);
  423. INSERT_W4_SB(ref0, ref1, ref2, ref3, ref);
  424. CALC_MSE_B(src, ref, err0);
  425. return HADD_SW_S32(err0);
  426. }
  427. #define VARIANCE_4Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 4);
  428. #define VARIANCE_4Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 5);
  429. #define VARIANCE_8Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 5);
  430. #define VARIANCE_8Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 6);
  431. #define VARIANCE_8Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 7);
  432. #define VARIANCE_16Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 7);
  433. #define VARIANCE_16Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 8);
  434. #define VARIANCE_16Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9);
  435. #define VARIANCE_32Wx16H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9);
  436. #define VARIANCE_32Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 10);
  437. #define VARIANCE_32Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
  438. #define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
  439. #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12);
  440. #define VPX_VARIANCE_WDXHT_MSA(wd, ht) \
  441. uint32_t vpx_variance##wd##x##ht##_msa( \
  442. const uint8_t *src, int32_t src_stride, const uint8_t *ref, \
  443. int32_t ref_stride, uint32_t *sse) { \
  444. int32_t diff; \
  445. \
  446. *sse = \
  447. sse_diff_##wd##width_msa(src, src_stride, ref, ref_stride, ht, &diff); \
  448. \
  449. return VARIANCE_##wd##Wx##ht##H(*sse, diff); \
  450. }
  451. VPX_VARIANCE_WDXHT_MSA(4, 4);
  452. VPX_VARIANCE_WDXHT_MSA(4, 8);
  453. VPX_VARIANCE_WDXHT_MSA(8, 4)
  454. VPX_VARIANCE_WDXHT_MSA(8, 8)
  455. VPX_VARIANCE_WDXHT_MSA(8, 16)
  456. VPX_VARIANCE_WDXHT_MSA(16, 8)
  457. VPX_VARIANCE_WDXHT_MSA(16, 16)
  458. VPX_VARIANCE_WDXHT_MSA(16, 32)
  459. VPX_VARIANCE_WDXHT_MSA(32, 16)
  460. VPX_VARIANCE_WDXHT_MSA(32, 32)
  461. uint32_t vpx_variance32x64_msa(const uint8_t *src, int32_t src_stride,
  462. const uint8_t *ref, int32_t ref_stride,
  463. uint32_t *sse) {
  464. int32_t diff;
  465. *sse = sse_diff_32x64_msa(src, src_stride, ref, ref_stride, &diff);
  466. return VARIANCE_32Wx64H(*sse, diff);
  467. }
  468. uint32_t vpx_variance64x32_msa(const uint8_t *src, int32_t src_stride,
  469. const uint8_t *ref, int32_t ref_stride,
  470. uint32_t *sse) {
  471. int32_t diff;
  472. *sse = sse_diff_64x32_msa(src, src_stride, ref, ref_stride, &diff);
  473. return VARIANCE_64Wx32H(*sse, diff);
  474. }
  475. uint32_t vpx_variance64x64_msa(const uint8_t *src, int32_t src_stride,
  476. const uint8_t *ref, int32_t ref_stride,
  477. uint32_t *sse) {
  478. int32_t diff;
  479. *sse = sse_diff_64x64_msa(src, src_stride, ref, ref_stride, &diff);
  480. return VARIANCE_64Wx64H(*sse, diff);
  481. }
  482. uint32_t vpx_mse8x8_msa(const uint8_t *src, int32_t src_stride,
  483. const uint8_t *ref, int32_t ref_stride, uint32_t *sse) {
  484. *sse = sse_8width_msa(src, src_stride, ref, ref_stride, 8);
  485. return *sse;
  486. }
  487. uint32_t vpx_mse8x16_msa(const uint8_t *src, int32_t src_stride,
  488. const uint8_t *ref, int32_t ref_stride,
  489. uint32_t *sse) {
  490. *sse = sse_8width_msa(src, src_stride, ref, ref_stride, 16);
  491. return *sse;
  492. }
  493. uint32_t vpx_mse16x8_msa(const uint8_t *src, int32_t src_stride,
  494. const uint8_t *ref, int32_t ref_stride,
  495. uint32_t *sse) {
  496. *sse = sse_16width_msa(src, src_stride, ref, ref_stride, 8);
  497. return *sse;
  498. }
  499. uint32_t vpx_mse16x16_msa(const uint8_t *src, int32_t src_stride,
  500. const uint8_t *ref, int32_t ref_stride,
  501. uint32_t *sse) {
  502. *sse = sse_16width_msa(src, src_stride, ref, ref_stride, 16);
  503. return *sse;
  504. }
  505. void vpx_get8x8var_msa(const uint8_t *src, int32_t src_stride,
  506. const uint8_t *ref, int32_t ref_stride, uint32_t *sse,
  507. int32_t *sum) {
  508. *sse = sse_diff_8width_msa(src, src_stride, ref, ref_stride, 8, sum);
  509. }
  510. void vpx_get16x16var_msa(const uint8_t *src, int32_t src_stride,
  511. const uint8_t *ref, int32_t ref_stride, uint32_t *sse,
  512. int32_t *sum) {
  513. *sse = sse_diff_16width_msa(src, src_stride, ref, ref_stride, 16, sum);
  514. }
  515. uint32_t vpx_get_mb_ss_msa(const int16_t *src) { return get_mb_ss_msa(src); }