highbd_intrapred_intrin_ssse3.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <tmmintrin.h>
  11. #include "./vpx_config.h"
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx/vpx_integer.h"
  14. // -----------------------------------------------------------------------------
  15. /*
  16. ; ------------------------------------------
  17. ; input: x, y, z, result
  18. ;
  19. ; trick from pascal
  20. ; (x+2y+z+2)>>2 can be calculated as:
  21. ; result = avg(x,z)
  22. ; result -= xor(x,z) & 1
  23. ; result = avg(result,y)
  24. ; ------------------------------------------
  25. */
  26. static INLINE __m128i avg3_epu16(const __m128i *x, const __m128i *y,
  27. const __m128i *z) {
  28. const __m128i one = _mm_set1_epi16(1);
  29. const __m128i a = _mm_avg_epu16(*x, *z);
  30. const __m128i b =
  31. _mm_subs_epu16(a, _mm_and_si128(_mm_xor_si128(*x, *z), one));
  32. return _mm_avg_epu16(b, *y);
  33. }
  34. void vpx_highbd_d45_predictor_4x4_ssse3(uint16_t *dst, ptrdiff_t stride,
  35. const uint16_t *above,
  36. const uint16_t *left, int bd) {
  37. const __m128i ABCDEFGH = _mm_loadu_si128((const __m128i *)above);
  38. const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 2);
  39. const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 4);
  40. const __m128i avg3 = avg3_epu16(&ABCDEFGH, &BCDEFGH0, &CDEFGH00);
  41. (void)left;
  42. (void)bd;
  43. _mm_storel_epi64((__m128i *)dst, avg3);
  44. dst += stride;
  45. _mm_storel_epi64((__m128i *)dst, _mm_srli_si128(avg3, 2));
  46. dst += stride;
  47. _mm_storel_epi64((__m128i *)dst, _mm_srli_si128(avg3, 4));
  48. dst += stride;
  49. _mm_storel_epi64((__m128i *)dst, _mm_srli_si128(avg3, 6));
  50. dst[3] = above[7]; // aka H
  51. }
  52. static INLINE void d45_store_8(uint16_t **dst, const ptrdiff_t stride,
  53. __m128i *row, const __m128i *ar) {
  54. *row = _mm_alignr_epi8(*ar, *row, 2);
  55. _mm_store_si128((__m128i *)*dst, *row);
  56. *dst += stride;
  57. }
  58. void vpx_highbd_d45_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
  59. const uint16_t *above,
  60. const uint16_t *left, int bd) {
  61. const __m128i ABCDEFGH = _mm_load_si128((const __m128i *)above);
  62. const __m128i ABCDHHHH = _mm_shufflehi_epi16(ABCDEFGH, 0xff);
  63. const __m128i HHHHHHHH = _mm_unpackhi_epi64(ABCDHHHH, ABCDHHHH);
  64. const __m128i BCDEFGHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 2);
  65. const __m128i CDEFGHHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 4);
  66. __m128i avg3 = avg3_epu16(&ABCDEFGH, &BCDEFGHH, &CDEFGHHH);
  67. (void)left;
  68. (void)bd;
  69. _mm_store_si128((__m128i *)dst, avg3);
  70. dst += stride;
  71. d45_store_8(&dst, stride, &avg3, &HHHHHHHH);
  72. d45_store_8(&dst, stride, &avg3, &HHHHHHHH);
  73. d45_store_8(&dst, stride, &avg3, &HHHHHHHH);
  74. d45_store_8(&dst, stride, &avg3, &HHHHHHHH);
  75. d45_store_8(&dst, stride, &avg3, &HHHHHHHH);
  76. d45_store_8(&dst, stride, &avg3, &HHHHHHHH);
  77. d45_store_8(&dst, stride, &avg3, &HHHHHHHH);
  78. }
  79. static INLINE void d45_store_16(uint16_t **dst, const ptrdiff_t stride,
  80. __m128i *row_0, __m128i *row_1,
  81. const __m128i *ar) {
  82. *row_0 = _mm_alignr_epi8(*row_1, *row_0, 2);
  83. *row_1 = _mm_alignr_epi8(*ar, *row_1, 2);
  84. _mm_store_si128((__m128i *)*dst, *row_0);
  85. _mm_store_si128((__m128i *)(*dst + 8), *row_1);
  86. *dst += stride;
  87. }
  88. void vpx_highbd_d45_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
  89. const uint16_t *above,
  90. const uint16_t *left, int bd) {
  91. const __m128i A0 = _mm_load_si128((const __m128i *)above);
  92. const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
  93. const __m128i AR0 = _mm_shufflehi_epi16(A1, 0xff);
  94. const __m128i AR = _mm_unpackhi_epi64(AR0, AR0);
  95. const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
  96. const __m128i B1 = _mm_alignr_epi8(AR, A1, 2);
  97. const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
  98. const __m128i C1 = _mm_alignr_epi8(AR, A1, 4);
  99. __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  100. __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  101. (void)left;
  102. (void)bd;
  103. _mm_store_si128((__m128i *)dst, avg3_0);
  104. _mm_store_si128((__m128i *)(dst + 8), avg3_1);
  105. dst += stride;
  106. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  107. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  108. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  109. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  110. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  111. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  112. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  113. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  114. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  115. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  116. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  117. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  118. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  119. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  120. d45_store_16(&dst, stride, &avg3_0, &avg3_1, &AR);
  121. }
  122. void vpx_highbd_d45_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
  123. const uint16_t *above,
  124. const uint16_t *left, int bd) {
  125. const __m128i A0 = _mm_load_si128((const __m128i *)above);
  126. const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
  127. const __m128i A2 = _mm_load_si128((const __m128i *)(above + 16));
  128. const __m128i A3 = _mm_load_si128((const __m128i *)(above + 24));
  129. const __m128i AR0 = _mm_shufflehi_epi16(A3, 0xff);
  130. const __m128i AR = _mm_unpackhi_epi64(AR0, AR0);
  131. const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
  132. const __m128i B1 = _mm_alignr_epi8(A2, A1, 2);
  133. const __m128i B2 = _mm_alignr_epi8(A3, A2, 2);
  134. const __m128i B3 = _mm_alignr_epi8(AR, A3, 2);
  135. const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
  136. const __m128i C1 = _mm_alignr_epi8(A2, A1, 4);
  137. const __m128i C2 = _mm_alignr_epi8(A3, A2, 4);
  138. const __m128i C3 = _mm_alignr_epi8(AR, A3, 4);
  139. __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  140. __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  141. __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
  142. __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
  143. int i;
  144. (void)left;
  145. (void)bd;
  146. _mm_store_si128((__m128i *)dst, avg3_0);
  147. _mm_store_si128((__m128i *)(dst + 8), avg3_1);
  148. _mm_store_si128((__m128i *)(dst + 16), avg3_2);
  149. _mm_store_si128((__m128i *)(dst + 24), avg3_3);
  150. dst += stride;
  151. for (i = 1; i < 32; ++i) {
  152. avg3_0 = _mm_alignr_epi8(avg3_1, avg3_0, 2);
  153. avg3_1 = _mm_alignr_epi8(avg3_2, avg3_1, 2);
  154. avg3_2 = _mm_alignr_epi8(avg3_3, avg3_2, 2);
  155. avg3_3 = _mm_alignr_epi8(AR, avg3_3, 2);
  156. _mm_store_si128((__m128i *)dst, avg3_0);
  157. _mm_store_si128((__m128i *)(dst + 8), avg3_1);
  158. _mm_store_si128((__m128i *)(dst + 16), avg3_2);
  159. _mm_store_si128((__m128i *)(dst + 24), avg3_3);
  160. dst += stride;
  161. }
  162. }
  163. DECLARE_ALIGNED(16, static const uint8_t,
  164. rotate_right_epu16[16]) = { 2, 3, 4, 5, 6, 7, 8, 9,
  165. 10, 11, 12, 13, 14, 15, 0, 1 };
  166. static INLINE __m128i rotr_epu16(__m128i *a, const __m128i *rotrw) {
  167. *a = _mm_shuffle_epi8(*a, *rotrw);
  168. return *a;
  169. }
  170. void vpx_highbd_d117_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
  171. const uint16_t *above,
  172. const uint16_t *left, int bd) {
  173. const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
  174. const __m128i XABCDEFG = _mm_loadu_si128((const __m128i *)(above - 1));
  175. const __m128i ABCDEFGH = _mm_load_si128((const __m128i *)above);
  176. const __m128i IJKLMNOP = _mm_load_si128((const __m128i *)left);
  177. const __m128i IXABCDEF =
  178. _mm_alignr_epi8(XABCDEFG, _mm_slli_si128(IJKLMNOP, 14), 14);
  179. const __m128i avg3 = avg3_epu16(&ABCDEFGH, &XABCDEFG, &IXABCDEF);
  180. const __m128i avg2 = _mm_avg_epu16(ABCDEFGH, XABCDEFG);
  181. const __m128i XIJKLMNO =
  182. _mm_alignr_epi8(IJKLMNOP, _mm_slli_si128(XABCDEFG, 14), 14);
  183. const __m128i JKLMNOP0 = _mm_srli_si128(IJKLMNOP, 2);
  184. __m128i avg3_left = avg3_epu16(&XIJKLMNO, &IJKLMNOP, &JKLMNOP0);
  185. __m128i rowa = avg2;
  186. __m128i rowb = avg3;
  187. int i;
  188. (void)bd;
  189. for (i = 0; i < 8; i += 2) {
  190. _mm_store_si128((__m128i *)dst, rowa);
  191. dst += stride;
  192. _mm_store_si128((__m128i *)dst, rowb);
  193. dst += stride;
  194. rowa = _mm_alignr_epi8(rowa, rotr_epu16(&avg3_left, &rotrw), 14);
  195. rowb = _mm_alignr_epi8(rowb, rotr_epu16(&avg3_left, &rotrw), 14);
  196. }
  197. }
  198. void vpx_highbd_d117_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
  199. const uint16_t *above,
  200. const uint16_t *left, int bd) {
  201. const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
  202. const __m128i B0 = _mm_loadu_si128((const __m128i *)(above - 1));
  203. const __m128i A0 = _mm_load_si128((const __m128i *)above);
  204. const __m128i B1 = _mm_loadu_si128((const __m128i *)(above + 7));
  205. const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
  206. const __m128i avg2_0 = _mm_avg_epu16(A0, B0);
  207. const __m128i avg2_1 = _mm_avg_epu16(A1, B1);
  208. const __m128i L0 = _mm_load_si128((const __m128i *)left);
  209. const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
  210. const __m128i C0 = _mm_alignr_epi8(B0, _mm_slli_si128(L0, 14), 14);
  211. const __m128i C1 = _mm_alignr_epi8(B1, B0, 14);
  212. const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  213. const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  214. const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(B0, 14), 14);
  215. const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
  216. const __m128i L0_ = _mm_alignr_epi8(L1, L0, 2);
  217. const __m128i L1_ = _mm_srli_si128(L1, 2);
  218. __m128i rowa_0 = avg2_0;
  219. __m128i rowa_1 = avg2_1;
  220. __m128i rowb_0 = avg3_0;
  221. __m128i rowb_1 = avg3_1;
  222. __m128i avg3_left[2];
  223. int i, j;
  224. (void)bd;
  225. avg3_left[0] = avg3_epu16(&XL0, &L0, &L0_);
  226. avg3_left[1] = avg3_epu16(&XL1, &L1, &L1_);
  227. for (i = 0; i < 2; ++i) {
  228. __m128i avg_left = avg3_left[i];
  229. for (j = 0; j < 8; j += 2) {
  230. _mm_store_si128((__m128i *)dst, rowa_0);
  231. _mm_store_si128((__m128i *)(dst + 8), rowa_1);
  232. dst += stride;
  233. _mm_store_si128((__m128i *)dst, rowb_0);
  234. _mm_store_si128((__m128i *)(dst + 8), rowb_1);
  235. dst += stride;
  236. rowa_1 = _mm_alignr_epi8(rowa_1, rowa_0, 14);
  237. rowa_0 = _mm_alignr_epi8(rowa_0, rotr_epu16(&avg_left, &rotrw), 14);
  238. rowb_1 = _mm_alignr_epi8(rowb_1, rowb_0, 14);
  239. rowb_0 = _mm_alignr_epi8(rowb_0, rotr_epu16(&avg_left, &rotrw), 14);
  240. }
  241. }
  242. }
  243. void vpx_highbd_d117_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
  244. const uint16_t *above,
  245. const uint16_t *left, int bd) {
  246. const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
  247. const __m128i A0 = _mm_load_si128((const __m128i *)above);
  248. const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
  249. const __m128i A2 = _mm_load_si128((const __m128i *)(above + 16));
  250. const __m128i A3 = _mm_load_si128((const __m128i *)(above + 24));
  251. const __m128i B0 = _mm_loadu_si128((const __m128i *)(above - 1));
  252. const __m128i B1 = _mm_loadu_si128((const __m128i *)(above + 7));
  253. const __m128i B2 = _mm_loadu_si128((const __m128i *)(above + 15));
  254. const __m128i B3 = _mm_loadu_si128((const __m128i *)(above + 23));
  255. const __m128i avg2_0 = _mm_avg_epu16(A0, B0);
  256. const __m128i avg2_1 = _mm_avg_epu16(A1, B1);
  257. const __m128i avg2_2 = _mm_avg_epu16(A2, B2);
  258. const __m128i avg2_3 = _mm_avg_epu16(A3, B3);
  259. const __m128i L0 = _mm_load_si128((const __m128i *)left);
  260. const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
  261. const __m128i L2 = _mm_load_si128((const __m128i *)(left + 16));
  262. const __m128i L3 = _mm_load_si128((const __m128i *)(left + 24));
  263. const __m128i C0 = _mm_alignr_epi8(B0, _mm_slli_si128(L0, 14), 14);
  264. const __m128i C1 = _mm_alignr_epi8(B1, B0, 14);
  265. const __m128i C2 = _mm_alignr_epi8(B2, B1, 14);
  266. const __m128i C3 = _mm_alignr_epi8(B3, B2, 14);
  267. const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  268. const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  269. const __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
  270. const __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
  271. const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(B0, 14), 14);
  272. const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
  273. const __m128i XL2 = _mm_alignr_epi8(L2, L1, 14);
  274. const __m128i XL3 = _mm_alignr_epi8(L3, L2, 14);
  275. const __m128i L0_ = _mm_alignr_epi8(L1, L0, 2);
  276. const __m128i L1_ = _mm_alignr_epi8(L2, L1, 2);
  277. const __m128i L2_ = _mm_alignr_epi8(L3, L2, 2);
  278. const __m128i L3_ = _mm_srli_si128(L3, 2);
  279. __m128i rowa_0 = avg2_0;
  280. __m128i rowa_1 = avg2_1;
  281. __m128i rowa_2 = avg2_2;
  282. __m128i rowa_3 = avg2_3;
  283. __m128i rowb_0 = avg3_0;
  284. __m128i rowb_1 = avg3_1;
  285. __m128i rowb_2 = avg3_2;
  286. __m128i rowb_3 = avg3_3;
  287. __m128i avg3_left[4];
  288. int i, j;
  289. (void)bd;
  290. avg3_left[0] = avg3_epu16(&XL0, &L0, &L0_);
  291. avg3_left[1] = avg3_epu16(&XL1, &L1, &L1_);
  292. avg3_left[2] = avg3_epu16(&XL2, &L2, &L2_);
  293. avg3_left[3] = avg3_epu16(&XL3, &L3, &L3_);
  294. for (i = 0; i < 4; ++i) {
  295. __m128i avg_left = avg3_left[i];
  296. for (j = 0; j < 8; j += 2) {
  297. _mm_store_si128((__m128i *)dst, rowa_0);
  298. _mm_store_si128((__m128i *)(dst + 8), rowa_1);
  299. _mm_store_si128((__m128i *)(dst + 16), rowa_2);
  300. _mm_store_si128((__m128i *)(dst + 24), rowa_3);
  301. dst += stride;
  302. _mm_store_si128((__m128i *)dst, rowb_0);
  303. _mm_store_si128((__m128i *)(dst + 8), rowb_1);
  304. _mm_store_si128((__m128i *)(dst + 16), rowb_2);
  305. _mm_store_si128((__m128i *)(dst + 24), rowb_3);
  306. dst += stride;
  307. rowa_3 = _mm_alignr_epi8(rowa_3, rowa_2, 14);
  308. rowa_2 = _mm_alignr_epi8(rowa_2, rowa_1, 14);
  309. rowa_1 = _mm_alignr_epi8(rowa_1, rowa_0, 14);
  310. rowa_0 = _mm_alignr_epi8(rowa_0, rotr_epu16(&avg_left, &rotrw), 14);
  311. rowb_3 = _mm_alignr_epi8(rowb_3, rowb_2, 14);
  312. rowb_2 = _mm_alignr_epi8(rowb_2, rowb_1, 14);
  313. rowb_1 = _mm_alignr_epi8(rowb_1, rowb_0, 14);
  314. rowb_0 = _mm_alignr_epi8(rowb_0, rotr_epu16(&avg_left, &rotrw), 14);
  315. }
  316. }
  317. }
  318. void vpx_highbd_d135_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
  319. const uint16_t *above,
  320. const uint16_t *left, int bd) {
  321. const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
  322. const __m128i XABCDEFG = _mm_loadu_si128((const __m128i *)(above - 1));
  323. const __m128i ABCDEFGH = _mm_load_si128((const __m128i *)above);
  324. const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 2);
  325. const __m128i IJKLMNOP = _mm_load_si128((const __m128i *)left);
  326. const __m128i XIJKLMNO =
  327. _mm_alignr_epi8(IJKLMNOP, _mm_slli_si128(XABCDEFG, 14), 14);
  328. const __m128i AXIJKLMN =
  329. _mm_alignr_epi8(XIJKLMNO, _mm_slli_si128(ABCDEFGH, 14), 14);
  330. const __m128i avg3 = avg3_epu16(&XABCDEFG, &ABCDEFGH, &BCDEFGH0);
  331. __m128i avg3_left = avg3_epu16(&IJKLMNOP, &XIJKLMNO, &AXIJKLMN);
  332. __m128i rowa = avg3;
  333. int i;
  334. (void)bd;
  335. for (i = 0; i < 8; ++i) {
  336. rowa = _mm_alignr_epi8(rowa, rotr_epu16(&avg3_left, &rotrw), 14);
  337. _mm_store_si128((__m128i *)dst, rowa);
  338. dst += stride;
  339. }
  340. }
  341. void vpx_highbd_d135_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
  342. const uint16_t *above,
  343. const uint16_t *left, int bd) {
  344. const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
  345. const __m128i A0 = _mm_loadu_si128((const __m128i *)(above - 1));
  346. const __m128i B0 = _mm_load_si128((const __m128i *)above);
  347. const __m128i A1 = _mm_loadu_si128((const __m128i *)(above + 7));
  348. const __m128i B1 = _mm_load_si128((const __m128i *)(above + 8));
  349. const __m128i L0 = _mm_load_si128((const __m128i *)left);
  350. const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
  351. const __m128i C0 = _mm_alignr_epi8(B1, B0, 2);
  352. const __m128i C1 = _mm_srli_si128(B1, 2);
  353. const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  354. const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  355. const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(A0, 14), 14);
  356. const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
  357. const __m128i L0_ = _mm_alignr_epi8(XL0, _mm_slli_si128(B0, 14), 14);
  358. const __m128i L1_ = _mm_alignr_epi8(XL1, XL0, 14);
  359. __m128i rowa_0 = avg3_0;
  360. __m128i rowa_1 = avg3_1;
  361. __m128i avg3_left[2];
  362. int i, j;
  363. (void)bd;
  364. avg3_left[0] = avg3_epu16(&L0, &XL0, &L0_);
  365. avg3_left[1] = avg3_epu16(&L1, &XL1, &L1_);
  366. for (i = 0; i < 2; ++i) {
  367. __m128i avg_left = avg3_left[i];
  368. for (j = 0; j < 8; ++j) {
  369. rowa_1 = _mm_alignr_epi8(rowa_1, rowa_0, 14);
  370. rowa_0 = _mm_alignr_epi8(rowa_0, rotr_epu16(&avg_left, &rotrw), 14);
  371. _mm_store_si128((__m128i *)dst, rowa_0);
  372. _mm_store_si128((__m128i *)(dst + 8), rowa_1);
  373. dst += stride;
  374. }
  375. }
  376. }
  377. void vpx_highbd_d135_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
  378. const uint16_t *above,
  379. const uint16_t *left, int bd) {
  380. const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
  381. const __m128i A0 = _mm_loadu_si128((const __m128i *)(above - 1));
  382. const __m128i A1 = _mm_loadu_si128((const __m128i *)(above + 7));
  383. const __m128i A2 = _mm_loadu_si128((const __m128i *)(above + 15));
  384. const __m128i A3 = _mm_loadu_si128((const __m128i *)(above + 23));
  385. const __m128i B0 = _mm_load_si128((const __m128i *)above);
  386. const __m128i B1 = _mm_load_si128((const __m128i *)(above + 8));
  387. const __m128i B2 = _mm_load_si128((const __m128i *)(above + 16));
  388. const __m128i B3 = _mm_load_si128((const __m128i *)(above + 24));
  389. const __m128i L0 = _mm_load_si128((const __m128i *)left);
  390. const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
  391. const __m128i L2 = _mm_load_si128((const __m128i *)(left + 16));
  392. const __m128i L3 = _mm_load_si128((const __m128i *)(left + 24));
  393. const __m128i C0 = _mm_alignr_epi8(B1, B0, 2);
  394. const __m128i C1 = _mm_alignr_epi8(B2, B1, 2);
  395. const __m128i C2 = _mm_alignr_epi8(B3, B2, 2);
  396. const __m128i C3 = _mm_srli_si128(B3, 2);
  397. const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  398. const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  399. const __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
  400. const __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
  401. const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(A0, 14), 14);
  402. const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
  403. const __m128i XL2 = _mm_alignr_epi8(L2, L1, 14);
  404. const __m128i XL3 = _mm_alignr_epi8(L3, L2, 14);
  405. const __m128i L0_ = _mm_alignr_epi8(XL0, _mm_slli_si128(B0, 14), 14);
  406. const __m128i L1_ = _mm_alignr_epi8(XL1, XL0, 14);
  407. const __m128i L2_ = _mm_alignr_epi8(XL2, XL1, 14);
  408. const __m128i L3_ = _mm_alignr_epi8(XL3, XL2, 14);
  409. __m128i rowa_0 = avg3_0;
  410. __m128i rowa_1 = avg3_1;
  411. __m128i rowa_2 = avg3_2;
  412. __m128i rowa_3 = avg3_3;
  413. __m128i avg3_left[4];
  414. int i, j;
  415. (void)bd;
  416. avg3_left[0] = avg3_epu16(&L0, &XL0, &L0_);
  417. avg3_left[1] = avg3_epu16(&L1, &XL1, &L1_);
  418. avg3_left[2] = avg3_epu16(&L2, &XL2, &L2_);
  419. avg3_left[3] = avg3_epu16(&L3, &XL3, &L3_);
  420. for (i = 0; i < 4; ++i) {
  421. __m128i avg_left = avg3_left[i];
  422. for (j = 0; j < 8; ++j) {
  423. rowa_3 = _mm_alignr_epi8(rowa_3, rowa_2, 14);
  424. rowa_2 = _mm_alignr_epi8(rowa_2, rowa_1, 14);
  425. rowa_1 = _mm_alignr_epi8(rowa_1, rowa_0, 14);
  426. rowa_0 = _mm_alignr_epi8(rowa_0, rotr_epu16(&avg_left, &rotrw), 14);
  427. _mm_store_si128((__m128i *)dst, rowa_0);
  428. _mm_store_si128((__m128i *)(dst + 8), rowa_1);
  429. _mm_store_si128((__m128i *)(dst + 16), rowa_2);
  430. _mm_store_si128((__m128i *)(dst + 24), rowa_3);
  431. dst += stride;
  432. }
  433. }
  434. }
  435. void vpx_highbd_d153_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
  436. const uint16_t *above,
  437. const uint16_t *left, int bd) {
  438. const __m128i XABCDEFG = _mm_loadu_si128((const __m128i *)(above - 1));
  439. const __m128i ABCDEFG0 = _mm_srli_si128(XABCDEFG, 2);
  440. const __m128i BCDEFG00 = _mm_srli_si128(XABCDEFG, 4);
  441. const __m128i avg3 = avg3_epu16(&BCDEFG00, &ABCDEFG0, &XABCDEFG);
  442. const __m128i IJKLMNOP = _mm_load_si128((const __m128i *)left);
  443. const __m128i XIJKLMNO =
  444. _mm_alignr_epi8(IJKLMNOP, _mm_slli_si128(XABCDEFG, 14), 14);
  445. const __m128i AXIJKLMN =
  446. _mm_alignr_epi8(XIJKLMNO, _mm_slli_si128(XABCDEFG, 12), 14);
  447. const __m128i avg3_left = avg3_epu16(&IJKLMNOP, &XIJKLMNO, &AXIJKLMN);
  448. const __m128i avg2_left = _mm_avg_epu16(IJKLMNOP, XIJKLMNO);
  449. const __m128i avg2_avg3_lo = _mm_unpacklo_epi16(avg2_left, avg3_left);
  450. const __m128i avg2_avg3_hi = _mm_unpackhi_epi16(avg2_left, avg3_left);
  451. const __m128i row0 =
  452. _mm_alignr_epi8(avg3, _mm_slli_si128(avg2_avg3_lo, 12), 12);
  453. const __m128i row1 =
  454. _mm_alignr_epi8(row0, _mm_slli_si128(avg2_avg3_lo, 8), 12);
  455. const __m128i row2 =
  456. _mm_alignr_epi8(row1, _mm_slli_si128(avg2_avg3_lo, 4), 12);
  457. const __m128i row3 = _mm_alignr_epi8(row2, avg2_avg3_lo, 12);
  458. const __m128i row4 =
  459. _mm_alignr_epi8(row3, _mm_slli_si128(avg2_avg3_hi, 12), 12);
  460. const __m128i row5 =
  461. _mm_alignr_epi8(row4, _mm_slli_si128(avg2_avg3_hi, 8), 12);
  462. const __m128i row6 =
  463. _mm_alignr_epi8(row5, _mm_slli_si128(avg2_avg3_hi, 4), 12);
  464. const __m128i row7 = _mm_alignr_epi8(row6, avg2_avg3_hi, 12);
  465. (void)bd;
  466. _mm_store_si128((__m128i *)dst, row0);
  467. dst += stride;
  468. _mm_store_si128((__m128i *)dst, row1);
  469. dst += stride;
  470. _mm_store_si128((__m128i *)dst, row2);
  471. dst += stride;
  472. _mm_store_si128((__m128i *)dst, row3);
  473. dst += stride;
  474. _mm_store_si128((__m128i *)dst, row4);
  475. dst += stride;
  476. _mm_store_si128((__m128i *)dst, row5);
  477. dst += stride;
  478. _mm_store_si128((__m128i *)dst, row6);
  479. dst += stride;
  480. _mm_store_si128((__m128i *)dst, row7);
  481. }
  482. void vpx_highbd_d153_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
  483. const uint16_t *above,
  484. const uint16_t *left, int bd) {
  485. const __m128i A0 = _mm_loadu_si128((const __m128i *)(above - 1));
  486. const __m128i A1 = _mm_loadu_si128((const __m128i *)(above + 7));
  487. const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
  488. const __m128i B1 = _mm_srli_si128(A1, 2);
  489. const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
  490. const __m128i C1 = _mm_srli_si128(A1, 4);
  491. const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  492. const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  493. const __m128i L0 = _mm_load_si128((const __m128i *)left);
  494. const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
  495. const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(A0, 14), 14);
  496. const __m128i AXL0 = _mm_alignr_epi8(XL0, _mm_slli_si128(A0, 12), 14);
  497. const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
  498. const __m128i AXL1 = _mm_alignr_epi8(L1, L0, 12);
  499. const __m128i avg3_left_0 = avg3_epu16(&L0, &XL0, &AXL0);
  500. const __m128i avg2_left_0 = _mm_avg_epu16(L0, XL0);
  501. const __m128i avg3_left_1 = avg3_epu16(&L1, &XL1, &AXL1);
  502. const __m128i avg2_left_1 = _mm_avg_epu16(L1, XL1);
  503. __m128i row_0 = avg3_0;
  504. __m128i row_1 = avg3_1;
  505. __m128i avg2_avg3_left[2][2];
  506. int i, j;
  507. (void)bd;
  508. avg2_avg3_left[0][0] = _mm_unpacklo_epi16(avg2_left_0, avg3_left_0);
  509. avg2_avg3_left[0][1] = _mm_unpackhi_epi16(avg2_left_0, avg3_left_0);
  510. avg2_avg3_left[1][0] = _mm_unpacklo_epi16(avg2_left_1, avg3_left_1);
  511. avg2_avg3_left[1][1] = _mm_unpackhi_epi16(avg2_left_1, avg3_left_1);
  512. for (j = 0; j < 2; ++j) {
  513. for (i = 0; i < 2; ++i) {
  514. const __m128i avg2_avg3 = avg2_avg3_left[j][i];
  515. row_1 = _mm_alignr_epi8(row_1, row_0, 12);
  516. row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 12), 12);
  517. _mm_store_si128((__m128i *)dst, row_0);
  518. _mm_store_si128((__m128i *)(dst + 8), row_1);
  519. dst += stride;
  520. row_1 = _mm_alignr_epi8(row_1, row_0, 12);
  521. row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 8), 12);
  522. _mm_store_si128((__m128i *)dst, row_0);
  523. _mm_store_si128((__m128i *)(dst + 8), row_1);
  524. dst += stride;
  525. row_1 = _mm_alignr_epi8(row_1, row_0, 12);
  526. row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 4), 12);
  527. _mm_store_si128((__m128i *)dst, row_0);
  528. _mm_store_si128((__m128i *)(dst + 8), row_1);
  529. dst += stride;
  530. row_1 = _mm_alignr_epi8(row_1, row_0, 12);
  531. row_0 = _mm_alignr_epi8(row_0, avg2_avg3, 12);
  532. _mm_store_si128((__m128i *)dst, row_0);
  533. _mm_store_si128((__m128i *)(dst + 8), row_1);
  534. dst += stride;
  535. }
  536. }
  537. }
  538. void vpx_highbd_d153_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
  539. const uint16_t *above,
  540. const uint16_t *left, int bd) {
  541. const __m128i A0 = _mm_loadu_si128((const __m128i *)(above - 1));
  542. const __m128i A1 = _mm_loadu_si128((const __m128i *)(above + 7));
  543. const __m128i A2 = _mm_loadu_si128((const __m128i *)(above + 15));
  544. const __m128i A3 = _mm_loadu_si128((const __m128i *)(above + 23));
  545. const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
  546. const __m128i B1 = _mm_alignr_epi8(A2, A1, 2);
  547. const __m128i B2 = _mm_alignr_epi8(A3, A2, 2);
  548. const __m128i B3 = _mm_srli_si128(A3, 2);
  549. const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
  550. const __m128i C1 = _mm_alignr_epi8(A2, A1, 4);
  551. const __m128i C2 = _mm_alignr_epi8(A3, A2, 4);
  552. const __m128i C3 = _mm_srli_si128(A3, 4);
  553. const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  554. const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  555. const __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
  556. const __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
  557. const __m128i L0 = _mm_load_si128((const __m128i *)left);
  558. const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
  559. const __m128i L2 = _mm_load_si128((const __m128i *)(left + 16));
  560. const __m128i L3 = _mm_load_si128((const __m128i *)(left + 24));
  561. const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(A0, 14), 14);
  562. const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
  563. const __m128i XL2 = _mm_alignr_epi8(L2, L1, 14);
  564. const __m128i XL3 = _mm_alignr_epi8(L3, L2, 14);
  565. const __m128i AXL0 = _mm_alignr_epi8(XL0, _mm_slli_si128(A0, 12), 14);
  566. const __m128i AXL1 = _mm_alignr_epi8(L1, L0, 12);
  567. const __m128i AXL2 = _mm_alignr_epi8(L2, L1, 12);
  568. const __m128i AXL3 = _mm_alignr_epi8(L3, L2, 12);
  569. const __m128i avg3_left_0 = avg3_epu16(&L0, &XL0, &AXL0);
  570. const __m128i avg3_left_1 = avg3_epu16(&L1, &XL1, &AXL1);
  571. const __m128i avg3_left_2 = avg3_epu16(&L2, &XL2, &AXL2);
  572. const __m128i avg3_left_3 = avg3_epu16(&L3, &XL3, &AXL3);
  573. const __m128i avg2_left_0 = _mm_avg_epu16(L0, XL0);
  574. const __m128i avg2_left_1 = _mm_avg_epu16(L1, XL1);
  575. const __m128i avg2_left_2 = _mm_avg_epu16(L2, XL2);
  576. const __m128i avg2_left_3 = _mm_avg_epu16(L3, XL3);
  577. __m128i row_0 = avg3_0;
  578. __m128i row_1 = avg3_1;
  579. __m128i row_2 = avg3_2;
  580. __m128i row_3 = avg3_3;
  581. __m128i avg2_avg3_left[4][2];
  582. int i, j;
  583. (void)bd;
  584. avg2_avg3_left[0][0] = _mm_unpacklo_epi16(avg2_left_0, avg3_left_0);
  585. avg2_avg3_left[0][1] = _mm_unpackhi_epi16(avg2_left_0, avg3_left_0);
  586. avg2_avg3_left[1][0] = _mm_unpacklo_epi16(avg2_left_1, avg3_left_1);
  587. avg2_avg3_left[1][1] = _mm_unpackhi_epi16(avg2_left_1, avg3_left_1);
  588. avg2_avg3_left[2][0] = _mm_unpacklo_epi16(avg2_left_2, avg3_left_2);
  589. avg2_avg3_left[2][1] = _mm_unpackhi_epi16(avg2_left_2, avg3_left_2);
  590. avg2_avg3_left[3][0] = _mm_unpacklo_epi16(avg2_left_3, avg3_left_3);
  591. avg2_avg3_left[3][1] = _mm_unpackhi_epi16(avg2_left_3, avg3_left_3);
  592. for (j = 0; j < 4; ++j) {
  593. for (i = 0; i < 2; ++i) {
  594. const __m128i avg2_avg3 = avg2_avg3_left[j][i];
  595. row_3 = _mm_alignr_epi8(row_3, row_2, 12);
  596. row_2 = _mm_alignr_epi8(row_2, row_1, 12);
  597. row_1 = _mm_alignr_epi8(row_1, row_0, 12);
  598. row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 12), 12);
  599. _mm_store_si128((__m128i *)dst, row_0);
  600. _mm_store_si128((__m128i *)(dst + 8), row_1);
  601. _mm_store_si128((__m128i *)(dst + 16), row_2);
  602. _mm_store_si128((__m128i *)(dst + 24), row_3);
  603. dst += stride;
  604. row_3 = _mm_alignr_epi8(row_3, row_2, 12);
  605. row_2 = _mm_alignr_epi8(row_2, row_1, 12);
  606. row_1 = _mm_alignr_epi8(row_1, row_0, 12);
  607. row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 8), 12);
  608. _mm_store_si128((__m128i *)dst, row_0);
  609. _mm_store_si128((__m128i *)(dst + 8), row_1);
  610. _mm_store_si128((__m128i *)(dst + 16), row_2);
  611. _mm_store_si128((__m128i *)(dst + 24), row_3);
  612. dst += stride;
  613. row_3 = _mm_alignr_epi8(row_3, row_2, 12);
  614. row_2 = _mm_alignr_epi8(row_2, row_1, 12);
  615. row_1 = _mm_alignr_epi8(row_1, row_0, 12);
  616. row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 4), 12);
  617. _mm_store_si128((__m128i *)dst, row_0);
  618. _mm_store_si128((__m128i *)(dst + 8), row_1);
  619. _mm_store_si128((__m128i *)(dst + 16), row_2);
  620. _mm_store_si128((__m128i *)(dst + 24), row_3);
  621. dst += stride;
  622. row_3 = _mm_alignr_epi8(row_3, row_2, 12);
  623. row_2 = _mm_alignr_epi8(row_2, row_1, 12);
  624. row_1 = _mm_alignr_epi8(row_1, row_0, 12);
  625. row_0 = _mm_alignr_epi8(row_0, avg2_avg3, 12);
  626. _mm_store_si128((__m128i *)dst, row_0);
  627. _mm_store_si128((__m128i *)(dst + 8), row_1);
  628. _mm_store_si128((__m128i *)(dst + 16), row_2);
  629. _mm_store_si128((__m128i *)(dst + 24), row_3);
  630. dst += stride;
  631. }
  632. }
  633. }
  634. static INLINE void d207_store_4x8(uint16_t **dst, const ptrdiff_t stride,
  635. const __m128i *a, const __m128i *b) {
  636. _mm_store_si128((__m128i *)*dst, *a);
  637. *dst += stride;
  638. _mm_store_si128((__m128i *)*dst, _mm_alignr_epi8(*b, *a, 4));
  639. *dst += stride;
  640. _mm_store_si128((__m128i *)*dst, _mm_alignr_epi8(*b, *a, 8));
  641. *dst += stride;
  642. _mm_store_si128((__m128i *)*dst, _mm_alignr_epi8(*b, *a, 12));
  643. *dst += stride;
  644. }
  645. void vpx_highbd_d207_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
  646. const uint16_t *above,
  647. const uint16_t *left, int bd) {
  648. const __m128i ABCDEFGH = _mm_load_si128((const __m128i *)left);
  649. const __m128i ABCDHHHH = _mm_shufflehi_epi16(ABCDEFGH, 0xff);
  650. const __m128i HHHHHHHH = _mm_unpackhi_epi64(ABCDHHHH, ABCDHHHH);
  651. const __m128i BCDEFGHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 2);
  652. const __m128i CDEFGHHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 4);
  653. const __m128i avg3 = avg3_epu16(&ABCDEFGH, &BCDEFGHH, &CDEFGHHH);
  654. const __m128i avg2 = _mm_avg_epu16(ABCDEFGH, BCDEFGHH);
  655. const __m128i out_a = _mm_unpacklo_epi16(avg2, avg3);
  656. const __m128i out_b = _mm_unpackhi_epi16(avg2, avg3);
  657. (void)above;
  658. (void)bd;
  659. d207_store_4x8(&dst, stride, &out_a, &out_b);
  660. d207_store_4x8(&dst, stride, &out_b, &HHHHHHHH);
  661. }
  662. static INLINE void d207_store_4x16(uint16_t **dst, const ptrdiff_t stride,
  663. const __m128i *a, const __m128i *b,
  664. const __m128i *c) {
  665. _mm_store_si128((__m128i *)*dst, *a);
  666. _mm_store_si128((__m128i *)(*dst + 8), *b);
  667. *dst += stride;
  668. _mm_store_si128((__m128i *)*dst, _mm_alignr_epi8(*b, *a, 4));
  669. _mm_store_si128((__m128i *)(*dst + 8), _mm_alignr_epi8(*c, *b, 4));
  670. *dst += stride;
  671. _mm_store_si128((__m128i *)*dst, _mm_alignr_epi8(*b, *a, 8));
  672. _mm_store_si128((__m128i *)(*dst + 8), _mm_alignr_epi8(*c, *b, 8));
  673. *dst += stride;
  674. _mm_store_si128((__m128i *)*dst, _mm_alignr_epi8(*b, *a, 12));
  675. _mm_store_si128((__m128i *)(*dst + 8), _mm_alignr_epi8(*c, *b, 12));
  676. *dst += stride;
  677. }
  678. void vpx_highbd_d207_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
  679. const uint16_t *above,
  680. const uint16_t *left, int bd) {
  681. const __m128i A0 = _mm_load_si128((const __m128i *)left);
  682. const __m128i A1 = _mm_load_si128((const __m128i *)(left + 8));
  683. const __m128i LR0 = _mm_shufflehi_epi16(A1, 0xff);
  684. const __m128i LR = _mm_unpackhi_epi64(LR0, LR0);
  685. const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
  686. const __m128i B1 = _mm_alignr_epi8(LR, A1, 2);
  687. const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
  688. const __m128i C1 = _mm_alignr_epi8(LR, A1, 4);
  689. const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  690. const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  691. const __m128i avg2_0 = _mm_avg_epu16(A0, B0);
  692. const __m128i avg2_1 = _mm_avg_epu16(A1, B1);
  693. const __m128i out_a = _mm_unpacklo_epi16(avg2_0, avg3_0);
  694. const __m128i out_b = _mm_unpackhi_epi16(avg2_0, avg3_0);
  695. const __m128i out_c = _mm_unpacklo_epi16(avg2_1, avg3_1);
  696. const __m128i out_d = _mm_unpackhi_epi16(avg2_1, avg3_1);
  697. (void)above;
  698. (void)bd;
  699. d207_store_4x16(&dst, stride, &out_a, &out_b, &out_c);
  700. d207_store_4x16(&dst, stride, &out_b, &out_c, &out_d);
  701. d207_store_4x16(&dst, stride, &out_c, &out_d, &LR);
  702. d207_store_4x16(&dst, stride, &out_d, &LR, &LR);
  703. }
  704. static INLINE void d207_store_4x32(uint16_t **dst, const ptrdiff_t stride,
  705. const __m128i *a, const __m128i *b,
  706. const __m128i *c, const __m128i *d,
  707. const __m128i *e) {
  708. _mm_store_si128((__m128i *)*dst, *a);
  709. _mm_store_si128((__m128i *)(*dst + 8), *b);
  710. _mm_store_si128((__m128i *)(*dst + 16), *c);
  711. _mm_store_si128((__m128i *)(*dst + 24), *d);
  712. *dst += stride;
  713. _mm_store_si128((__m128i *)*dst, _mm_alignr_epi8(*b, *a, 4));
  714. _mm_store_si128((__m128i *)(*dst + 8), _mm_alignr_epi8(*c, *b, 4));
  715. _mm_store_si128((__m128i *)(*dst + 16), _mm_alignr_epi8(*d, *c, 4));
  716. _mm_store_si128((__m128i *)(*dst + 24), _mm_alignr_epi8(*e, *d, 4));
  717. *dst += stride;
  718. _mm_store_si128((__m128i *)*dst, _mm_alignr_epi8(*b, *a, 8));
  719. _mm_store_si128((__m128i *)(*dst + 8), _mm_alignr_epi8(*c, *b, 8));
  720. _mm_store_si128((__m128i *)(*dst + 16), _mm_alignr_epi8(*d, *c, 8));
  721. _mm_store_si128((__m128i *)(*dst + 24), _mm_alignr_epi8(*e, *d, 8));
  722. *dst += stride;
  723. _mm_store_si128((__m128i *)*dst, _mm_alignr_epi8(*b, *a, 12));
  724. _mm_store_si128((__m128i *)(*dst + 8), _mm_alignr_epi8(*c, *b, 12));
  725. _mm_store_si128((__m128i *)(*dst + 16), _mm_alignr_epi8(*d, *c, 12));
  726. _mm_store_si128((__m128i *)(*dst + 24), _mm_alignr_epi8(*e, *d, 12));
  727. *dst += stride;
  728. }
  729. void vpx_highbd_d207_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
  730. const uint16_t *above,
  731. const uint16_t *left, int bd) {
  732. const __m128i A0 = _mm_load_si128((const __m128i *)left);
  733. const __m128i A1 = _mm_load_si128((const __m128i *)(left + 8));
  734. const __m128i A2 = _mm_load_si128((const __m128i *)(left + 16));
  735. const __m128i A3 = _mm_load_si128((const __m128i *)(left + 24));
  736. const __m128i LR0 = _mm_shufflehi_epi16(A3, 0xff);
  737. const __m128i LR = _mm_unpackhi_epi64(LR0, LR0);
  738. const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
  739. const __m128i B1 = _mm_alignr_epi8(A2, A1, 2);
  740. const __m128i B2 = _mm_alignr_epi8(A3, A2, 2);
  741. const __m128i B3 = _mm_alignr_epi8(LR, A3, 2);
  742. const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
  743. const __m128i C1 = _mm_alignr_epi8(A2, A1, 4);
  744. const __m128i C2 = _mm_alignr_epi8(A3, A2, 4);
  745. const __m128i C3 = _mm_alignr_epi8(LR, A3, 4);
  746. const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  747. const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  748. const __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
  749. const __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
  750. const __m128i avg2_0 = _mm_avg_epu16(A0, B0);
  751. const __m128i avg2_1 = _mm_avg_epu16(A1, B1);
  752. const __m128i avg2_2 = _mm_avg_epu16(A2, B2);
  753. const __m128i avg2_3 = _mm_avg_epu16(A3, B3);
  754. const __m128i out_a = _mm_unpacklo_epi16(avg2_0, avg3_0);
  755. const __m128i out_b = _mm_unpackhi_epi16(avg2_0, avg3_0);
  756. const __m128i out_c = _mm_unpacklo_epi16(avg2_1, avg3_1);
  757. const __m128i out_d = _mm_unpackhi_epi16(avg2_1, avg3_1);
  758. const __m128i out_e = _mm_unpacklo_epi16(avg2_2, avg3_2);
  759. const __m128i out_f = _mm_unpackhi_epi16(avg2_2, avg3_2);
  760. const __m128i out_g = _mm_unpacklo_epi16(avg2_3, avg3_3);
  761. const __m128i out_h = _mm_unpackhi_epi16(avg2_3, avg3_3);
  762. (void)above;
  763. (void)bd;
  764. d207_store_4x32(&dst, stride, &out_a, &out_b, &out_c, &out_d, &out_e);
  765. d207_store_4x32(&dst, stride, &out_b, &out_c, &out_d, &out_e, &out_f);
  766. d207_store_4x32(&dst, stride, &out_c, &out_d, &out_e, &out_f, &out_g);
  767. d207_store_4x32(&dst, stride, &out_d, &out_e, &out_f, &out_g, &out_h);
  768. d207_store_4x32(&dst, stride, &out_e, &out_f, &out_g, &out_h, &LR);
  769. d207_store_4x32(&dst, stride, &out_f, &out_g, &out_h, &LR, &LR);
  770. d207_store_4x32(&dst, stride, &out_g, &out_h, &LR, &LR, &LR);
  771. d207_store_4x32(&dst, stride, &out_h, &LR, &LR, &LR, &LR);
  772. }
  773. static INLINE void d63_store_4x8(uint16_t **dst, const ptrdiff_t stride,
  774. __m128i *a, __m128i *b, const __m128i *ar) {
  775. _mm_store_si128((__m128i *)*dst, *a);
  776. *dst += stride;
  777. _mm_store_si128((__m128i *)*dst, *b);
  778. *dst += stride;
  779. *a = _mm_alignr_epi8(*ar, *a, 2);
  780. *b = _mm_alignr_epi8(*ar, *b, 2);
  781. _mm_store_si128((__m128i *)*dst, *a);
  782. *dst += stride;
  783. _mm_store_si128((__m128i *)*dst, *b);
  784. *dst += stride;
  785. *a = _mm_alignr_epi8(*ar, *a, 2);
  786. *b = _mm_alignr_epi8(*ar, *b, 2);
  787. }
  788. void vpx_highbd_d63_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
  789. const uint16_t *above,
  790. const uint16_t *left, int bd) {
  791. const __m128i ABCDEFGH = _mm_load_si128((const __m128i *)above);
  792. const __m128i ABCDHHHH = _mm_shufflehi_epi16(ABCDEFGH, 0xff);
  793. const __m128i HHHHHHHH = _mm_unpackhi_epi64(ABCDHHHH, ABCDHHHH);
  794. const __m128i BCDEFGHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 2);
  795. const __m128i CDEFGHHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 4);
  796. __m128i avg3 = avg3_epu16(&ABCDEFGH, &BCDEFGHH, &CDEFGHHH);
  797. __m128i avg2 = _mm_avg_epu16(ABCDEFGH, BCDEFGHH);
  798. (void)left;
  799. (void)bd;
  800. d63_store_4x8(&dst, stride, &avg2, &avg3, &HHHHHHHH);
  801. d63_store_4x8(&dst, stride, &avg2, &avg3, &HHHHHHHH);
  802. }
  803. void vpx_highbd_d63_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
  804. const uint16_t *above,
  805. const uint16_t *left, int bd) {
  806. const __m128i A0 = _mm_load_si128((const __m128i *)above);
  807. const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
  808. const __m128i AR0 = _mm_shufflehi_epi16(A1, 0xff);
  809. const __m128i AR = _mm_unpackhi_epi64(AR0, AR0);
  810. const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
  811. const __m128i B1 = _mm_alignr_epi8(AR, A1, 2);
  812. const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
  813. const __m128i C1 = _mm_alignr_epi8(AR, A1, 4);
  814. __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  815. __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  816. __m128i avg2_0 = _mm_avg_epu16(A0, B0);
  817. __m128i avg2_1 = _mm_avg_epu16(A1, B1);
  818. int i;
  819. (void)left;
  820. (void)bd;
  821. for (i = 0; i < 14; i += 2) {
  822. _mm_store_si128((__m128i *)dst, avg2_0);
  823. _mm_store_si128((__m128i *)(dst + 8), avg2_1);
  824. dst += stride;
  825. _mm_store_si128((__m128i *)dst, avg3_0);
  826. _mm_store_si128((__m128i *)(dst + 8), avg3_1);
  827. dst += stride;
  828. avg2_0 = _mm_alignr_epi8(avg2_1, avg2_0, 2);
  829. avg2_1 = _mm_alignr_epi8(AR, avg2_1, 2);
  830. avg3_0 = _mm_alignr_epi8(avg3_1, avg3_0, 2);
  831. avg3_1 = _mm_alignr_epi8(AR, avg3_1, 2);
  832. }
  833. _mm_store_si128((__m128i *)dst, avg2_0);
  834. _mm_store_si128((__m128i *)(dst + 8), avg2_1);
  835. dst += stride;
  836. _mm_store_si128((__m128i *)dst, avg3_0);
  837. _mm_store_si128((__m128i *)(dst + 8), avg3_1);
  838. }
  839. void vpx_highbd_d63_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
  840. const uint16_t *above,
  841. const uint16_t *left, int bd) {
  842. const __m128i A0 = _mm_load_si128((const __m128i *)above);
  843. const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
  844. const __m128i A2 = _mm_load_si128((const __m128i *)(above + 16));
  845. const __m128i A3 = _mm_load_si128((const __m128i *)(above + 24));
  846. const __m128i AR0 = _mm_shufflehi_epi16(A3, 0xff);
  847. const __m128i AR = _mm_unpackhi_epi64(AR0, AR0);
  848. const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
  849. const __m128i B1 = _mm_alignr_epi8(A2, A1, 2);
  850. const __m128i B2 = _mm_alignr_epi8(A3, A2, 2);
  851. const __m128i B3 = _mm_alignr_epi8(AR, A3, 2);
  852. const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
  853. const __m128i C1 = _mm_alignr_epi8(A2, A1, 4);
  854. const __m128i C2 = _mm_alignr_epi8(A3, A2, 4);
  855. const __m128i C3 = _mm_alignr_epi8(AR, A3, 4);
  856. __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
  857. __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
  858. __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
  859. __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
  860. __m128i avg2_0 = _mm_avg_epu16(A0, B0);
  861. __m128i avg2_1 = _mm_avg_epu16(A1, B1);
  862. __m128i avg2_2 = _mm_avg_epu16(A2, B2);
  863. __m128i avg2_3 = _mm_avg_epu16(A3, B3);
  864. int i;
  865. (void)left;
  866. (void)bd;
  867. for (i = 0; i < 30; i += 2) {
  868. _mm_store_si128((__m128i *)dst, avg2_0);
  869. _mm_store_si128((__m128i *)(dst + 8), avg2_1);
  870. _mm_store_si128((__m128i *)(dst + 16), avg2_2);
  871. _mm_store_si128((__m128i *)(dst + 24), avg2_3);
  872. dst += stride;
  873. _mm_store_si128((__m128i *)dst, avg3_0);
  874. _mm_store_si128((__m128i *)(dst + 8), avg3_1);
  875. _mm_store_si128((__m128i *)(dst + 16), avg3_2);
  876. _mm_store_si128((__m128i *)(dst + 24), avg3_3);
  877. dst += stride;
  878. avg2_0 = _mm_alignr_epi8(avg2_1, avg2_0, 2);
  879. avg2_1 = _mm_alignr_epi8(avg2_2, avg2_1, 2);
  880. avg2_2 = _mm_alignr_epi8(avg2_3, avg2_2, 2);
  881. avg2_3 = _mm_alignr_epi8(AR, avg2_3, 2);
  882. avg3_0 = _mm_alignr_epi8(avg3_1, avg3_0, 2);
  883. avg3_1 = _mm_alignr_epi8(avg3_2, avg3_1, 2);
  884. avg3_2 = _mm_alignr_epi8(avg3_3, avg3_2, 2);
  885. avg3_3 = _mm_alignr_epi8(AR, avg3_3, 2);
  886. }
  887. _mm_store_si128((__m128i *)dst, avg2_0);
  888. _mm_store_si128((__m128i *)(dst + 8), avg2_1);
  889. _mm_store_si128((__m128i *)(dst + 16), avg2_2);
  890. _mm_store_si128((__m128i *)(dst + 24), avg2_3);
  891. dst += stride;
  892. _mm_store_si128((__m128i *)dst, avg3_0);
  893. _mm_store_si128((__m128i *)(dst + 8), avg3_1);
  894. _mm_store_si128((__m128i *)(dst + 16), avg3_2);
  895. _mm_store_si128((__m128i *)(dst + 24), avg3_3);
  896. }