2
0

variance_sse2.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <emmintrin.h> // SSE2
  12. #include "./vpx_config.h"
  13. #include "./vpx_dsp_rtcd.h"
  14. #include "vpx_ports/mem.h"
  15. #include "vpx_dsp/x86/mem_sse2.h"
  16. static INLINE unsigned int add32x4_sse2(__m128i val) {
  17. val = _mm_add_epi32(val, _mm_srli_si128(val, 8));
  18. val = _mm_add_epi32(val, _mm_srli_si128(val, 4));
  19. return _mm_cvtsi128_si32(val);
  20. }
  21. unsigned int vpx_get_mb_ss_sse2(const int16_t *src_ptr) {
  22. __m128i vsum = _mm_setzero_si128();
  23. int i;
  24. for (i = 0; i < 32; ++i) {
  25. const __m128i v = _mm_loadu_si128((const __m128i *)src_ptr);
  26. vsum = _mm_add_epi32(vsum, _mm_madd_epi16(v, v));
  27. src_ptr += 8;
  28. }
  29. return add32x4_sse2(vsum);
  30. }
  31. static INLINE __m128i load4x2_sse2(const uint8_t *const p, const int stride) {
  32. const __m128i p0 = _mm_cvtsi32_si128(loadu_uint32(p + 0 * stride));
  33. const __m128i p1 = _mm_cvtsi32_si128(loadu_uint32(p + 1 * stride));
  34. const __m128i p01 = _mm_unpacklo_epi32(p0, p1);
  35. return _mm_unpacklo_epi8(p01, _mm_setzero_si128());
  36. }
  37. static INLINE void variance_kernel_sse2(const __m128i src_ptr,
  38. const __m128i ref_ptr,
  39. __m128i *const sse,
  40. __m128i *const sum) {
  41. const __m128i diff = _mm_sub_epi16(src_ptr, ref_ptr);
  42. *sse = _mm_add_epi32(*sse, _mm_madd_epi16(diff, diff));
  43. *sum = _mm_add_epi16(*sum, diff);
  44. }
  45. // Can handle 128 pixels' diff sum (such as 8x16 or 16x8)
  46. // Slightly faster than variance_final_256_pel_sse2()
  47. static INLINE void variance_final_128_pel_sse2(__m128i vsse, __m128i vsum,
  48. unsigned int *const sse,
  49. int *const sum) {
  50. *sse = add32x4_sse2(vsse);
  51. vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
  52. vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
  53. vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2));
  54. *sum = (int16_t)_mm_extract_epi16(vsum, 0);
  55. }
  56. // Can handle 256 pixels' diff sum (such as 16x16)
  57. static INLINE void variance_final_256_pel_sse2(__m128i vsse, __m128i vsum,
  58. unsigned int *const sse,
  59. int *const sum) {
  60. *sse = add32x4_sse2(vsse);
  61. vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
  62. vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
  63. *sum = (int16_t)_mm_extract_epi16(vsum, 0);
  64. *sum += (int16_t)_mm_extract_epi16(vsum, 1);
  65. }
  66. // Can handle 512 pixels' diff sum (such as 16x32 or 32x16)
  67. static INLINE void variance_final_512_pel_sse2(__m128i vsse, __m128i vsum,
  68. unsigned int *const sse,
  69. int *const sum) {
  70. *sse = add32x4_sse2(vsse);
  71. vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
  72. vsum = _mm_unpacklo_epi16(vsum, vsum);
  73. vsum = _mm_srai_epi32(vsum, 16);
  74. *sum = add32x4_sse2(vsum);
  75. }
  76. static INLINE __m128i sum_to_32bit_sse2(const __m128i sum) {
  77. const __m128i sum_lo = _mm_srai_epi32(_mm_unpacklo_epi16(sum, sum), 16);
  78. const __m128i sum_hi = _mm_srai_epi32(_mm_unpackhi_epi16(sum, sum), 16);
  79. return _mm_add_epi32(sum_lo, sum_hi);
  80. }
  81. // Can handle 1024 pixels' diff sum (such as 32x32)
  82. static INLINE int sum_final_sse2(const __m128i sum) {
  83. const __m128i t = sum_to_32bit_sse2(sum);
  84. return add32x4_sse2(t);
  85. }
  86. static INLINE void variance4_sse2(const uint8_t *src_ptr, const int src_stride,
  87. const uint8_t *ref_ptr, const int ref_stride,
  88. const int h, __m128i *const sse,
  89. __m128i *const sum) {
  90. int i;
  91. assert(h <= 256); // May overflow for larger height.
  92. *sse = _mm_setzero_si128();
  93. *sum = _mm_setzero_si128();
  94. for (i = 0; i < h; i += 2) {
  95. const __m128i s = load4x2_sse2(src_ptr, src_stride);
  96. const __m128i r = load4x2_sse2(ref_ptr, ref_stride);
  97. variance_kernel_sse2(s, r, sse, sum);
  98. src_ptr += 2 * src_stride;
  99. ref_ptr += 2 * ref_stride;
  100. }
  101. }
  102. static INLINE void variance8_sse2(const uint8_t *src_ptr, const int src_stride,
  103. const uint8_t *ref_ptr, const int ref_stride,
  104. const int h, __m128i *const sse,
  105. __m128i *const sum) {
  106. const __m128i zero = _mm_setzero_si128();
  107. int i;
  108. assert(h <= 128); // May overflow for larger height.
  109. *sse = _mm_setzero_si128();
  110. *sum = _mm_setzero_si128();
  111. for (i = 0; i < h; i++) {
  112. const __m128i s =
  113. _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)src_ptr), zero);
  114. const __m128i r =
  115. _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)ref_ptr), zero);
  116. variance_kernel_sse2(s, r, sse, sum);
  117. src_ptr += src_stride;
  118. ref_ptr += ref_stride;
  119. }
  120. }
  121. static INLINE void variance16_kernel_sse2(const uint8_t *const src_ptr,
  122. const uint8_t *const ref_ptr,
  123. __m128i *const sse,
  124. __m128i *const sum) {
  125. const __m128i zero = _mm_setzero_si128();
  126. const __m128i s = _mm_loadu_si128((const __m128i *)src_ptr);
  127. const __m128i r = _mm_loadu_si128((const __m128i *)ref_ptr);
  128. const __m128i src0 = _mm_unpacklo_epi8(s, zero);
  129. const __m128i ref0 = _mm_unpacklo_epi8(r, zero);
  130. const __m128i src1 = _mm_unpackhi_epi8(s, zero);
  131. const __m128i ref1 = _mm_unpackhi_epi8(r, zero);
  132. variance_kernel_sse2(src0, ref0, sse, sum);
  133. variance_kernel_sse2(src1, ref1, sse, sum);
  134. }
  135. static INLINE void variance16_sse2(const uint8_t *src_ptr, const int src_stride,
  136. const uint8_t *ref_ptr, const int ref_stride,
  137. const int h, __m128i *const sse,
  138. __m128i *const sum) {
  139. int i;
  140. assert(h <= 64); // May overflow for larger height.
  141. *sse = _mm_setzero_si128();
  142. *sum = _mm_setzero_si128();
  143. for (i = 0; i < h; ++i) {
  144. variance16_kernel_sse2(src_ptr, ref_ptr, sse, sum);
  145. src_ptr += src_stride;
  146. ref_ptr += ref_stride;
  147. }
  148. }
  149. static INLINE void variance32_sse2(const uint8_t *src_ptr, const int src_stride,
  150. const uint8_t *ref_ptr, const int ref_stride,
  151. const int h, __m128i *const sse,
  152. __m128i *const sum) {
  153. int i;
  154. assert(h <= 32); // May overflow for larger height.
  155. // Don't initialize sse here since it's an accumulation.
  156. *sum = _mm_setzero_si128();
  157. for (i = 0; i < h; ++i) {
  158. variance16_kernel_sse2(src_ptr + 0, ref_ptr + 0, sse, sum);
  159. variance16_kernel_sse2(src_ptr + 16, ref_ptr + 16, sse, sum);
  160. src_ptr += src_stride;
  161. ref_ptr += ref_stride;
  162. }
  163. }
  164. static INLINE void variance64_sse2(const uint8_t *src_ptr, const int src_stride,
  165. const uint8_t *ref_ptr, const int ref_stride,
  166. const int h, __m128i *const sse,
  167. __m128i *const sum) {
  168. int i;
  169. assert(h <= 16); // May overflow for larger height.
  170. // Don't initialize sse here since it's an accumulation.
  171. *sum = _mm_setzero_si128();
  172. for (i = 0; i < h; ++i) {
  173. variance16_kernel_sse2(src_ptr + 0, ref_ptr + 0, sse, sum);
  174. variance16_kernel_sse2(src_ptr + 16, ref_ptr + 16, sse, sum);
  175. variance16_kernel_sse2(src_ptr + 32, ref_ptr + 32, sse, sum);
  176. variance16_kernel_sse2(src_ptr + 48, ref_ptr + 48, sse, sum);
  177. src_ptr += src_stride;
  178. ref_ptr += ref_stride;
  179. }
  180. }
  181. void vpx_get8x8var_sse2(const uint8_t *src_ptr, int src_stride,
  182. const uint8_t *ref_ptr, int ref_stride,
  183. unsigned int *sse, int *sum) {
  184. __m128i vsse, vsum;
  185. variance8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
  186. variance_final_128_pel_sse2(vsse, vsum, sse, sum);
  187. }
  188. void vpx_get16x16var_sse2(const uint8_t *src_ptr, int src_stride,
  189. const uint8_t *ref_ptr, int ref_stride,
  190. unsigned int *sse, int *sum) {
  191. __m128i vsse, vsum;
  192. variance16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
  193. variance_final_256_pel_sse2(vsse, vsum, sse, sum);
  194. }
  195. unsigned int vpx_variance4x4_sse2(const uint8_t *src_ptr, int src_stride,
  196. const uint8_t *ref_ptr, int ref_stride,
  197. unsigned int *sse) {
  198. __m128i vsse, vsum;
  199. int sum;
  200. variance4_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 4, &vsse, &vsum);
  201. variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
  202. return *sse - ((sum * sum) >> 4);
  203. }
  204. unsigned int vpx_variance4x8_sse2(const uint8_t *src_ptr, int src_stride,
  205. const uint8_t *ref_ptr, int ref_stride,
  206. unsigned int *sse) {
  207. __m128i vsse, vsum;
  208. int sum;
  209. variance4_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
  210. variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
  211. return *sse - ((sum * sum) >> 5);
  212. }
  213. unsigned int vpx_variance8x4_sse2(const uint8_t *src_ptr, int src_stride,
  214. const uint8_t *ref_ptr, int ref_stride,
  215. unsigned int *sse) {
  216. __m128i vsse, vsum;
  217. int sum;
  218. variance8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 4, &vsse, &vsum);
  219. variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
  220. return *sse - ((sum * sum) >> 5);
  221. }
  222. unsigned int vpx_variance8x8_sse2(const uint8_t *src_ptr, int src_stride,
  223. const uint8_t *ref_ptr, int ref_stride,
  224. unsigned int *sse) {
  225. __m128i vsse, vsum;
  226. int sum;
  227. variance8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
  228. variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
  229. return *sse - ((sum * sum) >> 6);
  230. }
  231. unsigned int vpx_variance8x16_sse2(const uint8_t *src_ptr, int src_stride,
  232. const uint8_t *ref_ptr, int ref_stride,
  233. unsigned int *sse) {
  234. __m128i vsse, vsum;
  235. int sum;
  236. variance8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
  237. variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
  238. return *sse - ((sum * sum) >> 7);
  239. }
  240. unsigned int vpx_variance16x8_sse2(const uint8_t *src_ptr, int src_stride,
  241. const uint8_t *ref_ptr, int ref_stride,
  242. unsigned int *sse) {
  243. __m128i vsse, vsum;
  244. int sum;
  245. variance16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
  246. variance_final_128_pel_sse2(vsse, vsum, sse, &sum);
  247. return *sse - ((sum * sum) >> 7);
  248. }
  249. unsigned int vpx_variance16x16_sse2(const uint8_t *src_ptr, int src_stride,
  250. const uint8_t *ref_ptr, int ref_stride,
  251. unsigned int *sse) {
  252. __m128i vsse, vsum;
  253. int sum;
  254. variance16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
  255. variance_final_256_pel_sse2(vsse, vsum, sse, &sum);
  256. return *sse - (uint32_t)(((int64_t)sum * sum) >> 8);
  257. }
  258. unsigned int vpx_variance16x32_sse2(const uint8_t *src_ptr, int src_stride,
  259. const uint8_t *ref_ptr, int ref_stride,
  260. unsigned int *sse) {
  261. __m128i vsse, vsum;
  262. int sum;
  263. variance16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
  264. variance_final_512_pel_sse2(vsse, vsum, sse, &sum);
  265. return *sse - (unsigned int)(((int64_t)sum * sum) >> 9);
  266. }
  267. unsigned int vpx_variance32x16_sse2(const uint8_t *src_ptr, int src_stride,
  268. const uint8_t *ref_ptr, int ref_stride,
  269. unsigned int *sse) {
  270. __m128i vsse = _mm_setzero_si128();
  271. __m128i vsum;
  272. int sum;
  273. variance32_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
  274. variance_final_512_pel_sse2(vsse, vsum, sse, &sum);
  275. return *sse - (unsigned int)(((int64_t)sum * sum) >> 9);
  276. }
  277. unsigned int vpx_variance32x32_sse2(const uint8_t *src_ptr, int src_stride,
  278. const uint8_t *ref_ptr, int ref_stride,
  279. unsigned int *sse) {
  280. __m128i vsse = _mm_setzero_si128();
  281. __m128i vsum;
  282. int sum;
  283. variance32_sse2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
  284. *sse = add32x4_sse2(vsse);
  285. sum = sum_final_sse2(vsum);
  286. return *sse - (unsigned int)(((int64_t)sum * sum) >> 10);
  287. }
  288. unsigned int vpx_variance32x64_sse2(const uint8_t *src_ptr, int src_stride,
  289. const uint8_t *ref_ptr, int ref_stride,
  290. unsigned int *sse) {
  291. __m128i vsse = _mm_setzero_si128();
  292. __m128i vsum = _mm_setzero_si128();
  293. int sum;
  294. int i = 0;
  295. for (i = 0; i < 2; i++) {
  296. __m128i vsum16;
  297. variance32_sse2(src_ptr + 32 * i * src_stride, src_stride,
  298. ref_ptr + 32 * i * ref_stride, ref_stride, 32, &vsse,
  299. &vsum16);
  300. vsum = _mm_add_epi32(vsum, sum_to_32bit_sse2(vsum16));
  301. }
  302. *sse = add32x4_sse2(vsse);
  303. sum = add32x4_sse2(vsum);
  304. return *sse - (unsigned int)(((int64_t)sum * sum) >> 11);
  305. }
  306. unsigned int vpx_variance64x32_sse2(const uint8_t *src_ptr, int src_stride,
  307. const uint8_t *ref_ptr, int ref_stride,
  308. unsigned int *sse) {
  309. __m128i vsse = _mm_setzero_si128();
  310. __m128i vsum = _mm_setzero_si128();
  311. int sum;
  312. int i = 0;
  313. for (i = 0; i < 2; i++) {
  314. __m128i vsum16;
  315. variance64_sse2(src_ptr + 16 * i * src_stride, src_stride,
  316. ref_ptr + 16 * i * ref_stride, ref_stride, 16, &vsse,
  317. &vsum16);
  318. vsum = _mm_add_epi32(vsum, sum_to_32bit_sse2(vsum16));
  319. }
  320. *sse = add32x4_sse2(vsse);
  321. sum = add32x4_sse2(vsum);
  322. return *sse - (unsigned int)(((int64_t)sum * sum) >> 11);
  323. }
  324. unsigned int vpx_variance64x64_sse2(const uint8_t *src_ptr, int src_stride,
  325. const uint8_t *ref_ptr, int ref_stride,
  326. unsigned int *sse) {
  327. __m128i vsse = _mm_setzero_si128();
  328. __m128i vsum = _mm_setzero_si128();
  329. int sum;
  330. int i = 0;
  331. for (i = 0; i < 4; i++) {
  332. __m128i vsum16;
  333. variance64_sse2(src_ptr + 16 * i * src_stride, src_stride,
  334. ref_ptr + 16 * i * ref_stride, ref_stride, 16, &vsse,
  335. &vsum16);
  336. vsum = _mm_add_epi32(vsum, sum_to_32bit_sse2(vsum16));
  337. }
  338. *sse = add32x4_sse2(vsse);
  339. sum = add32x4_sse2(vsum);
  340. return *sse - (unsigned int)(((int64_t)sum * sum) >> 12);
  341. }
  342. unsigned int vpx_mse8x8_sse2(const uint8_t *src_ptr, int src_stride,
  343. const uint8_t *ref_ptr, int ref_stride,
  344. unsigned int *sse) {
  345. vpx_variance8x8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, sse);
  346. return *sse;
  347. }
  348. unsigned int vpx_mse8x16_sse2(const uint8_t *src_ptr, int src_stride,
  349. const uint8_t *ref_ptr, int ref_stride,
  350. unsigned int *sse) {
  351. vpx_variance8x16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, sse);
  352. return *sse;
  353. }
  354. unsigned int vpx_mse16x8_sse2(const uint8_t *src_ptr, int src_stride,
  355. const uint8_t *ref_ptr, int ref_stride,
  356. unsigned int *sse) {
  357. vpx_variance16x8_sse2(src_ptr, src_stride, ref_ptr, ref_stride, sse);
  358. return *sse;
  359. }
  360. unsigned int vpx_mse16x16_sse2(const uint8_t *src_ptr, int src_stride,
  361. const uint8_t *ref_ptr, int ref_stride,
  362. unsigned int *sse) {
  363. vpx_variance16x16_sse2(src_ptr, src_stride, ref_ptr, ref_stride, sse);
  364. return *sse;
  365. }
  366. // The 2 unused parameters are place holders for PIC enabled build.
  367. // These definitions are for functions defined in subpel_variance.asm
  368. #define DECL(w, opt) \
  369. int vpx_sub_pixel_variance##w##xh_##opt( \
  370. const uint8_t *src_ptr, ptrdiff_t src_stride, int x_offset, \
  371. int y_offset, const uint8_t *ref_ptr, ptrdiff_t ref_stride, int height, \
  372. unsigned int *sse, void *unused0, void *unused)
  373. #define DECLS(opt1, opt2) \
  374. DECL(4, opt1); \
  375. DECL(8, opt1); \
  376. DECL(16, opt1)
  377. DECLS(sse2, sse2);
  378. DECLS(ssse3, ssse3);
  379. #undef DECLS
  380. #undef DECL
  381. #define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
  382. unsigned int vpx_sub_pixel_variance##w##x##h##_##opt( \
  383. const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
  384. const uint8_t *ref_ptr, int ref_stride, unsigned int *sse) { \
  385. unsigned int sse_tmp; \
  386. int se = vpx_sub_pixel_variance##wf##xh_##opt( \
  387. src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, h, \
  388. &sse_tmp, NULL, NULL); \
  389. if (w > wf) { \
  390. unsigned int sse2; \
  391. int se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
  392. src_ptr + 16, src_stride, x_offset, y_offset, ref_ptr + 16, \
  393. ref_stride, h, &sse2, NULL, NULL); \
  394. se += se2; \
  395. sse_tmp += sse2; \
  396. if (w > wf * 2) { \
  397. se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
  398. src_ptr + 32, src_stride, x_offset, y_offset, ref_ptr + 32, \
  399. ref_stride, h, &sse2, NULL, NULL); \
  400. se += se2; \
  401. sse_tmp += sse2; \
  402. se2 = vpx_sub_pixel_variance##wf##xh_##opt( \
  403. src_ptr + 48, src_stride, x_offset, y_offset, ref_ptr + 48, \
  404. ref_stride, h, &sse2, NULL, NULL); \
  405. se += se2; \
  406. sse_tmp += sse2; \
  407. } \
  408. } \
  409. *sse = sse_tmp; \
  410. return sse_tmp - \
  411. (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
  412. }
  413. #define FNS(opt1, opt2) \
  414. FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
  415. FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
  416. FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
  417. FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
  418. FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
  419. FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
  420. FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
  421. FN(16, 8, 16, 4, 3, opt1, (int32_t), (int32_t)); \
  422. FN(8, 16, 8, 3, 4, opt1, (int32_t), (int32_t)); \
  423. FN(8, 8, 8, 3, 3, opt1, (int32_t), (int32_t)); \
  424. FN(8, 4, 8, 3, 2, opt1, (int32_t), (int32_t)); \
  425. FN(4, 8, 4, 2, 3, opt1, (int32_t), (int32_t)); \
  426. FN(4, 4, 4, 2, 2, opt1, (int32_t), (int32_t))
  427. FNS(sse2, sse2);
  428. FNS(ssse3, ssse3);
  429. #undef FNS
  430. #undef FN
  431. // The 2 unused parameters are place holders for PIC enabled build.
  432. #define DECL(w, opt) \
  433. int vpx_sub_pixel_avg_variance##w##xh_##opt( \
  434. const uint8_t *src_ptr, ptrdiff_t src_stride, int x_offset, \
  435. int y_offset, const uint8_t *ref_ptr, ptrdiff_t ref_stride, \
  436. const uint8_t *second_pred, ptrdiff_t second_stride, int height, \
  437. unsigned int *sse, void *unused0, void *unused)
  438. #define DECLS(opt1, opt2) \
  439. DECL(4, opt1); \
  440. DECL(8, opt1); \
  441. DECL(16, opt1)
  442. DECLS(sse2, sse2);
  443. DECLS(ssse3, ssse3);
  444. #undef DECL
  445. #undef DECLS
  446. #define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
  447. unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt( \
  448. const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset, \
  449. const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, \
  450. const uint8_t *second_pred) { \
  451. unsigned int sse_tmp; \
  452. int se = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
  453. src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, \
  454. second_pred, w, h, &sse_tmp, NULL, NULL); \
  455. if (w > wf) { \
  456. unsigned int sse2; \
  457. int se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
  458. src_ptr + 16, src_stride, x_offset, y_offset, ref_ptr + 16, \
  459. ref_stride, second_pred + 16, w, h, &sse2, NULL, NULL); \
  460. se += se2; \
  461. sse_tmp += sse2; \
  462. if (w > wf * 2) { \
  463. se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
  464. src_ptr + 32, src_stride, x_offset, y_offset, ref_ptr + 32, \
  465. ref_stride, second_pred + 32, w, h, &sse2, NULL, NULL); \
  466. se += se2; \
  467. sse_tmp += sse2; \
  468. se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \
  469. src_ptr + 48, src_stride, x_offset, y_offset, ref_ptr + 48, \
  470. ref_stride, second_pred + 48, w, h, &sse2, NULL, NULL); \
  471. se += se2; \
  472. sse_tmp += sse2; \
  473. } \
  474. } \
  475. *sse = sse_tmp; \
  476. return sse_tmp - \
  477. (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
  478. }
  479. #define FNS(opt1, opt2) \
  480. FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
  481. FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
  482. FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
  483. FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
  484. FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
  485. FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
  486. FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
  487. FN(16, 8, 16, 4, 3, opt1, (uint32_t), (int32_t)); \
  488. FN(8, 16, 8, 3, 4, opt1, (uint32_t), (int32_t)); \
  489. FN(8, 8, 8, 3, 3, opt1, (uint32_t), (int32_t)); \
  490. FN(8, 4, 8, 3, 2, opt1, (uint32_t), (int32_t)); \
  491. FN(4, 8, 4, 2, 3, opt1, (uint32_t), (int32_t)); \
  492. FN(4, 4, 4, 2, 2, opt1, (uint32_t), (int32_t))
  493. FNS(sse2, sse);
  494. FNS(ssse3, ssse3);
  495. #undef FNS
  496. #undef FN