highbd_intrapred_neon.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <arm_neon.h>
  11. #include "./vpx_config.h"
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx/vpx_integer.h"
  14. //------------------------------------------------------------------------------
  15. // DC 4x4
  16. static INLINE uint16x4_t dc_sum_4(const uint16_t *ref) {
  17. const uint16x4_t ref_u16 = vld1_u16(ref);
  18. const uint16x4_t p0 = vpadd_u16(ref_u16, ref_u16);
  19. return vpadd_u16(p0, p0);
  20. }
  21. static INLINE void dc_store_4x4(uint16_t *dst, ptrdiff_t stride,
  22. const uint16x4_t dc) {
  23. const uint16x4_t dc_dup = vdup_lane_u16(dc, 0);
  24. int i;
  25. for (i = 0; i < 4; ++i, dst += stride) {
  26. vst1_u16(dst, dc_dup);
  27. }
  28. }
  29. void vpx_highbd_dc_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
  30. const uint16_t *above,
  31. const uint16_t *left, int bd) {
  32. const uint16x4_t a = vld1_u16(above);
  33. const uint16x4_t l = vld1_u16(left);
  34. uint16x4_t sum;
  35. uint16x4_t dc;
  36. (void)bd;
  37. sum = vadd_u16(a, l);
  38. sum = vpadd_u16(sum, sum);
  39. sum = vpadd_u16(sum, sum);
  40. dc = vrshr_n_u16(sum, 3);
  41. dc_store_4x4(dst, stride, dc);
  42. }
  43. void vpx_highbd_dc_left_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
  44. const uint16_t *above,
  45. const uint16_t *left, int bd) {
  46. const uint16x4_t sum = dc_sum_4(left);
  47. const uint16x4_t dc = vrshr_n_u16(sum, 2);
  48. (void)above;
  49. (void)bd;
  50. dc_store_4x4(dst, stride, dc);
  51. }
  52. void vpx_highbd_dc_top_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
  53. const uint16_t *above,
  54. const uint16_t *left, int bd) {
  55. const uint16x4_t sum = dc_sum_4(above);
  56. const uint16x4_t dc = vrshr_n_u16(sum, 2);
  57. (void)left;
  58. (void)bd;
  59. dc_store_4x4(dst, stride, dc);
  60. }
  61. void vpx_highbd_dc_128_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
  62. const uint16_t *above,
  63. const uint16_t *left, int bd) {
  64. const uint16x4_t dc = vdup_n_u16(1 << (bd - 1));
  65. (void)above;
  66. (void)left;
  67. dc_store_4x4(dst, stride, dc);
  68. }
  69. //------------------------------------------------------------------------------
  70. // DC 8x8
  71. static INLINE uint16x4_t dc_sum_8(const uint16_t *ref) {
  72. const uint16x8_t ref_u16 = vld1q_u16(ref);
  73. uint16x4_t sum = vadd_u16(vget_low_u16(ref_u16), vget_high_u16(ref_u16));
  74. sum = vpadd_u16(sum, sum);
  75. return vpadd_u16(sum, sum);
  76. }
  77. static INLINE void dc_store_8x8(uint16_t *dst, ptrdiff_t stride,
  78. const uint16x4_t dc) {
  79. const uint16x8_t dc_dup = vdupq_lane_u16(dc, 0);
  80. int i;
  81. for (i = 0; i < 8; ++i, dst += stride) {
  82. vst1q_u16(dst, dc_dup);
  83. }
  84. }
  85. void vpx_highbd_dc_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
  86. const uint16_t *above,
  87. const uint16_t *left, int bd) {
  88. const uint16x8_t above_u16 = vld1q_u16(above);
  89. const uint16x8_t left_u16 = vld1q_u16(left);
  90. const uint16x8_t p0 = vaddq_u16(above_u16, left_u16);
  91. uint16x4_t sum = vadd_u16(vget_low_u16(p0), vget_high_u16(p0));
  92. uint16x4_t dc;
  93. (void)bd;
  94. sum = vpadd_u16(sum, sum);
  95. sum = vpadd_u16(sum, sum);
  96. dc = vrshr_n_u16(sum, 4);
  97. dc_store_8x8(dst, stride, dc);
  98. }
  99. void vpx_highbd_dc_left_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
  100. const uint16_t *above,
  101. const uint16_t *left, int bd) {
  102. const uint16x4_t sum = dc_sum_8(left);
  103. const uint16x4_t dc = vrshr_n_u16(sum, 3);
  104. (void)above;
  105. (void)bd;
  106. dc_store_8x8(dst, stride, dc);
  107. }
  108. void vpx_highbd_dc_top_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
  109. const uint16_t *above,
  110. const uint16_t *left, int bd) {
  111. const uint16x4_t sum = dc_sum_8(above);
  112. const uint16x4_t dc = vrshr_n_u16(sum, 3);
  113. (void)left;
  114. (void)bd;
  115. dc_store_8x8(dst, stride, dc);
  116. }
  117. void vpx_highbd_dc_128_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
  118. const uint16_t *above,
  119. const uint16_t *left, int bd) {
  120. const uint16x4_t dc = vdup_n_u16(1 << (bd - 1));
  121. (void)above;
  122. (void)left;
  123. dc_store_8x8(dst, stride, dc);
  124. }
  125. //------------------------------------------------------------------------------
  126. // DC 16x16
  127. static INLINE uint16x4_t dc_sum_16(const uint16_t *ref) {
  128. const uint16x8x2_t ref_u16 = vld2q_u16(ref);
  129. const uint16x8_t p0 = vaddq_u16(ref_u16.val[0], ref_u16.val[1]);
  130. uint16x4_t sum = vadd_u16(vget_low_u16(p0), vget_high_u16(p0));
  131. sum = vpadd_u16(sum, sum);
  132. return vpadd_u16(sum, sum);
  133. }
  134. static INLINE void dc_store_16x16(uint16_t *dst, ptrdiff_t stride,
  135. const uint16x4_t dc) {
  136. uint16x8x2_t dc_dup;
  137. int i;
  138. dc_dup.val[0] = dc_dup.val[1] = vdupq_lane_u16(dc, 0);
  139. for (i = 0; i < 16; ++i, dst += stride) {
  140. vst2q_u16(dst, dc_dup);
  141. }
  142. }
  143. void vpx_highbd_dc_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
  144. const uint16_t *above,
  145. const uint16_t *left, int bd) {
  146. const uint16x8x2_t a = vld2q_u16(above);
  147. const uint16x8x2_t l = vld2q_u16(left);
  148. const uint16x8_t pa = vaddq_u16(a.val[0], a.val[1]);
  149. const uint16x8_t pl = vaddq_u16(l.val[0], l.val[1]);
  150. const uint16x8_t pal0 = vaddq_u16(pa, pl);
  151. uint16x4_t pal1 = vadd_u16(vget_low_u16(pal0), vget_high_u16(pal0));
  152. uint32x2_t sum;
  153. uint16x4_t dc;
  154. (void)bd;
  155. pal1 = vpadd_u16(pal1, pal1);
  156. sum = vpaddl_u16(pal1);
  157. dc = vreinterpret_u16_u32(vrshr_n_u32(sum, 5));
  158. dc_store_16x16(dst, stride, dc);
  159. }
  160. void vpx_highbd_dc_left_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
  161. const uint16_t *above,
  162. const uint16_t *left, int bd) {
  163. const uint16x4_t sum = dc_sum_16(left);
  164. const uint16x4_t dc = vrshr_n_u16(sum, 4);
  165. (void)above;
  166. (void)bd;
  167. dc_store_16x16(dst, stride, dc);
  168. }
  169. void vpx_highbd_dc_top_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
  170. const uint16_t *above,
  171. const uint16_t *left, int bd) {
  172. const uint16x4_t sum = dc_sum_16(above);
  173. const uint16x4_t dc = vrshr_n_u16(sum, 4);
  174. (void)left;
  175. (void)bd;
  176. dc_store_16x16(dst, stride, dc);
  177. }
  178. void vpx_highbd_dc_128_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
  179. const uint16_t *above,
  180. const uint16_t *left, int bd) {
  181. const uint16x4_t dc = vdup_n_u16(1 << (bd - 1));
  182. (void)above;
  183. (void)left;
  184. dc_store_16x16(dst, stride, dc);
  185. }
  186. //------------------------------------------------------------------------------
  187. // DC 32x32
  188. static INLINE uint32x2_t dc_sum_32(const uint16_t *ref) {
  189. const uint16x8x4_t r = vld4q_u16(ref);
  190. const uint16x8_t p0 = vaddq_u16(r.val[0], r.val[1]);
  191. const uint16x8_t p1 = vaddq_u16(r.val[2], r.val[3]);
  192. const uint16x8_t p2 = vaddq_u16(p0, p1);
  193. uint16x4_t sum = vadd_u16(vget_low_u16(p2), vget_high_u16(p2));
  194. sum = vpadd_u16(sum, sum);
  195. return vpaddl_u16(sum);
  196. }
  197. static INLINE void dc_store_32x32(uint16_t *dst, ptrdiff_t stride,
  198. const uint16x4_t dc) {
  199. uint16x8x2_t dc_dup;
  200. int i;
  201. dc_dup.val[0] = dc_dup.val[1] = vdupq_lane_u16(dc, 0);
  202. for (i = 0; i < 32; ++i) {
  203. vst2q_u16(dst, dc_dup);
  204. dst += 16;
  205. vst2q_u16(dst, dc_dup);
  206. dst += stride - 16;
  207. }
  208. }
  209. void vpx_highbd_dc_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
  210. const uint16_t *above,
  211. const uint16_t *left, int bd) {
  212. const uint16x8x4_t a = vld4q_u16(above);
  213. const uint16x8x4_t l = vld4q_u16(left);
  214. const uint16x8_t pa0 = vaddq_u16(a.val[0], a.val[1]);
  215. const uint16x8_t pa1 = vaddq_u16(a.val[2], a.val[3]);
  216. const uint16x8_t pl0 = vaddq_u16(l.val[0], l.val[1]);
  217. const uint16x8_t pl1 = vaddq_u16(l.val[2], l.val[3]);
  218. const uint16x8_t pa = vaddq_u16(pa0, pa1);
  219. const uint16x8_t pl = vaddq_u16(pl0, pl1);
  220. const uint16x8_t pal0 = vaddq_u16(pa, pl);
  221. const uint16x4_t pal1 = vadd_u16(vget_low_u16(pal0), vget_high_u16(pal0));
  222. uint32x2_t sum = vpaddl_u16(pal1);
  223. uint16x4_t dc;
  224. (void)bd;
  225. sum = vpadd_u32(sum, sum);
  226. dc = vreinterpret_u16_u32(vrshr_n_u32(sum, 6));
  227. dc_store_32x32(dst, stride, dc);
  228. }
  229. void vpx_highbd_dc_left_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
  230. const uint16_t *above,
  231. const uint16_t *left, int bd) {
  232. const uint32x2_t sum = dc_sum_32(left);
  233. const uint16x4_t dc = vreinterpret_u16_u32(vrshr_n_u32(sum, 5));
  234. (void)above;
  235. (void)bd;
  236. dc_store_32x32(dst, stride, dc);
  237. }
  238. void vpx_highbd_dc_top_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
  239. const uint16_t *above,
  240. const uint16_t *left, int bd) {
  241. const uint32x2_t sum = dc_sum_32(above);
  242. const uint16x4_t dc = vreinterpret_u16_u32(vrshr_n_u32(sum, 5));
  243. (void)left;
  244. (void)bd;
  245. dc_store_32x32(dst, stride, dc);
  246. }
  247. void vpx_highbd_dc_128_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
  248. const uint16_t *above,
  249. const uint16_t *left, int bd) {
  250. const uint16x4_t dc = vdup_n_u16(1 << (bd - 1));
  251. (void)above;
  252. (void)left;
  253. dc_store_32x32(dst, stride, dc);
  254. }
  255. // -----------------------------------------------------------------------------
  256. void vpx_highbd_d45_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
  257. const uint16_t *above,
  258. const uint16_t *left, int bd) {
  259. const uint16x8_t ABCDEFGH = vld1q_u16(above);
  260. const uint16x8_t BCDEFGH0 = vld1q_u16(above + 1);
  261. const uint16x8_t CDEFGH00 = vld1q_u16(above + 2);
  262. const uint16x8_t avg1 = vhaddq_u16(ABCDEFGH, CDEFGH00);
  263. const uint16x8_t avg2 = vrhaddq_u16(avg1, BCDEFGH0);
  264. const uint16x4_t avg2_low = vget_low_u16(avg2);
  265. const uint16x4_t avg2_high = vget_high_u16(avg2);
  266. const uint16x4_t r1 = vext_u16(avg2_low, avg2_high, 1);
  267. const uint16x4_t r2 = vext_u16(avg2_low, avg2_high, 2);
  268. const uint16x4_t r3 = vext_u16(avg2_low, avg2_high, 3);
  269. (void)left;
  270. (void)bd;
  271. vst1_u16(dst, avg2_low);
  272. dst += stride;
  273. vst1_u16(dst, r1);
  274. dst += stride;
  275. vst1_u16(dst, r2);
  276. dst += stride;
  277. vst1_u16(dst, r3);
  278. vst1q_lane_u16(dst + 3, ABCDEFGH, 7);
  279. }
  280. static INLINE void d45_store_8(uint16_t **dst, const ptrdiff_t stride,
  281. const uint16x8_t above_right, uint16x8_t *row) {
  282. *row = vextq_u16(*row, above_right, 1);
  283. vst1q_u16(*dst, *row);
  284. *dst += stride;
  285. }
  286. void vpx_highbd_d45_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
  287. const uint16_t *above,
  288. const uint16_t *left, int bd) {
  289. const uint16x8_t A0 = vld1q_u16(above);
  290. const uint16x8_t above_right = vdupq_lane_u16(vget_high_u16(A0), 3);
  291. const uint16x8_t A1 = vld1q_u16(above + 1);
  292. const uint16x8_t A2 = vld1q_u16(above + 2);
  293. const uint16x8_t avg1 = vhaddq_u16(A0, A2);
  294. uint16x8_t row = vrhaddq_u16(avg1, A1);
  295. (void)left;
  296. (void)bd;
  297. vst1q_u16(dst, row);
  298. dst += stride;
  299. d45_store_8(&dst, stride, above_right, &row);
  300. d45_store_8(&dst, stride, above_right, &row);
  301. d45_store_8(&dst, stride, above_right, &row);
  302. d45_store_8(&dst, stride, above_right, &row);
  303. d45_store_8(&dst, stride, above_right, &row);
  304. d45_store_8(&dst, stride, above_right, &row);
  305. vst1q_u16(dst, above_right);
  306. }
  307. static INLINE void d45_store_16(uint16_t **dst, const ptrdiff_t stride,
  308. const uint16x8_t above_right, uint16x8_t *row_0,
  309. uint16x8_t *row_1) {
  310. *row_0 = vextq_u16(*row_0, *row_1, 1);
  311. *row_1 = vextq_u16(*row_1, above_right, 1);
  312. vst1q_u16(*dst, *row_0);
  313. *dst += 8;
  314. vst1q_u16(*dst, *row_1);
  315. *dst += stride - 8;
  316. }
  317. void vpx_highbd_d45_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
  318. const uint16_t *above,
  319. const uint16_t *left, int bd) {
  320. const uint16x8_t A0_0 = vld1q_u16(above);
  321. const uint16x8_t A0_1 = vld1q_u16(above + 8);
  322. const uint16x8_t above_right = vdupq_lane_u16(vget_high_u16(A0_1), 3);
  323. const uint16x8_t A1_0 = vld1q_u16(above + 1);
  324. const uint16x8_t A1_1 = vld1q_u16(above + 9);
  325. const uint16x8_t A2_0 = vld1q_u16(above + 2);
  326. const uint16x8_t A2_1 = vld1q_u16(above + 10);
  327. const uint16x8_t avg_0 = vhaddq_u16(A0_0, A2_0);
  328. const uint16x8_t avg_1 = vhaddq_u16(A0_1, A2_1);
  329. uint16x8_t row_0 = vrhaddq_u16(avg_0, A1_0);
  330. uint16x8_t row_1 = vrhaddq_u16(avg_1, A1_1);
  331. (void)left;
  332. (void)bd;
  333. vst1q_u16(dst, row_0);
  334. vst1q_u16(dst + 8, row_1);
  335. dst += stride;
  336. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  337. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  338. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  339. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  340. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  341. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  342. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  343. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  344. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  345. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  346. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  347. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  348. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  349. d45_store_16(&dst, stride, above_right, &row_0, &row_1);
  350. vst1q_u16(dst, above_right);
  351. vst1q_u16(dst + 8, above_right);
  352. }
  353. void vpx_highbd_d45_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
  354. const uint16_t *above,
  355. const uint16_t *left, int bd) {
  356. const uint16x8_t A0_0 = vld1q_u16(above);
  357. const uint16x8_t A0_1 = vld1q_u16(above + 8);
  358. const uint16x8_t A0_2 = vld1q_u16(above + 16);
  359. const uint16x8_t A0_3 = vld1q_u16(above + 24);
  360. const uint16x8_t above_right = vdupq_lane_u16(vget_high_u16(A0_3), 3);
  361. const uint16x8_t A1_0 = vld1q_u16(above + 1);
  362. const uint16x8_t A1_1 = vld1q_u16(above + 9);
  363. const uint16x8_t A1_2 = vld1q_u16(above + 17);
  364. const uint16x8_t A1_3 = vld1q_u16(above + 25);
  365. const uint16x8_t A2_0 = vld1q_u16(above + 2);
  366. const uint16x8_t A2_1 = vld1q_u16(above + 10);
  367. const uint16x8_t A2_2 = vld1q_u16(above + 18);
  368. const uint16x8_t A2_3 = vld1q_u16(above + 26);
  369. const uint16x8_t avg_0 = vhaddq_u16(A0_0, A2_0);
  370. const uint16x8_t avg_1 = vhaddq_u16(A0_1, A2_1);
  371. const uint16x8_t avg_2 = vhaddq_u16(A0_2, A2_2);
  372. const uint16x8_t avg_3 = vhaddq_u16(A0_3, A2_3);
  373. uint16x8_t row_0 = vrhaddq_u16(avg_0, A1_0);
  374. uint16x8_t row_1 = vrhaddq_u16(avg_1, A1_1);
  375. uint16x8_t row_2 = vrhaddq_u16(avg_2, A1_2);
  376. uint16x8_t row_3 = vrhaddq_u16(avg_3, A1_3);
  377. int i;
  378. (void)left;
  379. (void)bd;
  380. vst1q_u16(dst, row_0);
  381. dst += 8;
  382. vst1q_u16(dst, row_1);
  383. dst += 8;
  384. vst1q_u16(dst, row_2);
  385. dst += 8;
  386. vst1q_u16(dst, row_3);
  387. dst += stride - 24;
  388. for (i = 0; i < 30; ++i) {
  389. row_0 = vextq_u16(row_0, row_1, 1);
  390. row_1 = vextq_u16(row_1, row_2, 1);
  391. row_2 = vextq_u16(row_2, row_3, 1);
  392. row_3 = vextq_u16(row_3, above_right, 1);
  393. vst1q_u16(dst, row_0);
  394. dst += 8;
  395. vst1q_u16(dst, row_1);
  396. dst += 8;
  397. vst1q_u16(dst, row_2);
  398. dst += 8;
  399. vst1q_u16(dst, row_3);
  400. dst += stride - 24;
  401. }
  402. vst1q_u16(dst, above_right);
  403. dst += 8;
  404. vst1q_u16(dst, above_right);
  405. dst += 8;
  406. vst1q_u16(dst, above_right);
  407. dst += 8;
  408. vst1q_u16(dst, above_right);
  409. }
  410. // -----------------------------------------------------------------------------
  411. void vpx_highbd_d135_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
  412. const uint16_t *above,
  413. const uint16_t *left, int bd) {
  414. const uint16x8_t XA0123___ = vld1q_u16(above - 1);
  415. const uint16x4_t L0123 = vld1_u16(left);
  416. const uint16x4_t L3210 = vrev64_u16(L0123);
  417. const uint16x8_t L____3210 = vcombine_u16(L0123, L3210);
  418. const uint16x8_t L3210XA012 = vcombine_u16(L3210, vget_low_u16(XA0123___));
  419. const uint16x8_t L210XA0123 = vextq_u16(L____3210, XA0123___, 5);
  420. const uint16x8_t L10XA0123_ = vextq_u16(L____3210, XA0123___, 6);
  421. const uint16x8_t avg1 = vhaddq_u16(L3210XA012, L10XA0123_);
  422. const uint16x8_t avg2 = vrhaddq_u16(avg1, L210XA0123);
  423. const uint16x4_t row_0 = vget_low_u16(avg2);
  424. const uint16x4_t row_1 = vget_high_u16(avg2);
  425. const uint16x4_t r0 = vext_u16(row_0, row_1, 3);
  426. const uint16x4_t r1 = vext_u16(row_0, row_1, 2);
  427. const uint16x4_t r2 = vext_u16(row_0, row_1, 1);
  428. (void)bd;
  429. vst1_u16(dst, r0);
  430. dst += stride;
  431. vst1_u16(dst, r1);
  432. dst += stride;
  433. vst1_u16(dst, r2);
  434. dst += stride;
  435. vst1_u16(dst, row_0);
  436. }
  437. void vpx_highbd_d135_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
  438. const uint16_t *above,
  439. const uint16_t *left, int bd) {
  440. const uint16x8_t XA0123456 = vld1q_u16(above - 1);
  441. const uint16x8_t A01234567 = vld1q_u16(above);
  442. const uint16x8_t A1234567_ = vld1q_u16(above + 1);
  443. const uint16x8_t L01234567 = vld1q_u16(left);
  444. const uint16x4_t L3210 = vrev64_u16(vget_low_u16(L01234567));
  445. const uint16x4_t L7654 = vrev64_u16(vget_high_u16(L01234567));
  446. const uint16x8_t L76543210 = vcombine_u16(L7654, L3210);
  447. const uint16x8_t L6543210X = vextq_u16(L76543210, XA0123456, 1);
  448. const uint16x8_t L543210XA0 = vextq_u16(L76543210, XA0123456, 2);
  449. const uint16x8_t avg_0 = vhaddq_u16(L76543210, L543210XA0);
  450. const uint16x8_t avg_1 = vhaddq_u16(XA0123456, A1234567_);
  451. const uint16x8_t row_0 = vrhaddq_u16(avg_0, L6543210X);
  452. const uint16x8_t row_1 = vrhaddq_u16(avg_1, A01234567);
  453. const uint16x8_t r0 = vextq_u16(row_0, row_1, 7);
  454. const uint16x8_t r1 = vextq_u16(row_0, row_1, 6);
  455. const uint16x8_t r2 = vextq_u16(row_0, row_1, 5);
  456. const uint16x8_t r3 = vextq_u16(row_0, row_1, 4);
  457. const uint16x8_t r4 = vextq_u16(row_0, row_1, 3);
  458. const uint16x8_t r5 = vextq_u16(row_0, row_1, 2);
  459. const uint16x8_t r6 = vextq_u16(row_0, row_1, 1);
  460. (void)bd;
  461. vst1q_u16(dst, r0);
  462. dst += stride;
  463. vst1q_u16(dst, r1);
  464. dst += stride;
  465. vst1q_u16(dst, r2);
  466. dst += stride;
  467. vst1q_u16(dst, r3);
  468. dst += stride;
  469. vst1q_u16(dst, r4);
  470. dst += stride;
  471. vst1q_u16(dst, r5);
  472. dst += stride;
  473. vst1q_u16(dst, r6);
  474. dst += stride;
  475. vst1q_u16(dst, row_0);
  476. }
  477. static INLINE void d135_store_16(uint16_t **dst, const ptrdiff_t stride,
  478. const uint16x8_t row_0,
  479. const uint16x8_t row_1) {
  480. vst1q_u16(*dst, row_0);
  481. *dst += 8;
  482. vst1q_u16(*dst, row_1);
  483. *dst += stride - 8;
  484. }
  485. void vpx_highbd_d135_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
  486. const uint16_t *above,
  487. const uint16_t *left, int bd) {
  488. const uint16x8_t L01234567 = vld1q_u16(left);
  489. const uint16x8_t L89abcdef = vld1q_u16(left + 8);
  490. const uint16x4_t L3210 = vrev64_u16(vget_low_u16(L01234567));
  491. const uint16x4_t L7654 = vrev64_u16(vget_high_u16(L01234567));
  492. const uint16x4_t Lba98 = vrev64_u16(vget_low_u16(L89abcdef));
  493. const uint16x4_t Lfedc = vrev64_u16(vget_high_u16(L89abcdef));
  494. const uint16x8_t L76543210 = vcombine_u16(L7654, L3210);
  495. const uint16x8_t Lfedcba98 = vcombine_u16(Lfedc, Lba98);
  496. const uint16x8_t Ledcba987 = vextq_u16(Lfedcba98, L76543210, 1);
  497. const uint16x8_t Ldcba9876 = vextq_u16(Lfedcba98, L76543210, 2);
  498. const uint16x8_t avg_0 = vhaddq_u16(Lfedcba98, Ldcba9876);
  499. const uint16x8_t row_0 = vrhaddq_u16(avg_0, Ledcba987);
  500. const uint16x8_t XA0123456 = vld1q_u16(above - 1);
  501. const uint16x8_t L6543210X = vextq_u16(L76543210, XA0123456, 1);
  502. const uint16x8_t L543210XA0 = vextq_u16(L76543210, XA0123456, 2);
  503. const uint16x8_t avg_1 = vhaddq_u16(L76543210, L543210XA0);
  504. const uint16x8_t row_1 = vrhaddq_u16(avg_1, L6543210X);
  505. const uint16x8_t A01234567 = vld1q_u16(above);
  506. const uint16x8_t A12345678 = vld1q_u16(above + 1);
  507. const uint16x8_t avg_2 = vhaddq_u16(XA0123456, A12345678);
  508. const uint16x8_t row_2 = vrhaddq_u16(avg_2, A01234567);
  509. const uint16x8_t A789abcde = vld1q_u16(above + 7);
  510. const uint16x8_t A89abcdef = vld1q_u16(above + 8);
  511. const uint16x8_t A9abcdef_ = vld1q_u16(above + 9);
  512. const uint16x8_t avg_3 = vhaddq_u16(A789abcde, A9abcdef_);
  513. const uint16x8_t row_3 = vrhaddq_u16(avg_3, A89abcdef);
  514. const uint16x8_t r0_0 = vextq_u16(row_1, row_2, 7);
  515. const uint16x8_t r0_1 = vextq_u16(row_2, row_3, 7);
  516. const uint16x8_t r1_0 = vextq_u16(row_1, row_2, 6);
  517. const uint16x8_t r1_1 = vextq_u16(row_2, row_3, 6);
  518. const uint16x8_t r2_0 = vextq_u16(row_1, row_2, 5);
  519. const uint16x8_t r2_1 = vextq_u16(row_2, row_3, 5);
  520. const uint16x8_t r3_0 = vextq_u16(row_1, row_2, 4);
  521. const uint16x8_t r3_1 = vextq_u16(row_2, row_3, 4);
  522. const uint16x8_t r4_0 = vextq_u16(row_1, row_2, 3);
  523. const uint16x8_t r4_1 = vextq_u16(row_2, row_3, 3);
  524. const uint16x8_t r5_0 = vextq_u16(row_1, row_2, 2);
  525. const uint16x8_t r5_1 = vextq_u16(row_2, row_3, 2);
  526. const uint16x8_t r6_0 = vextq_u16(row_1, row_2, 1);
  527. const uint16x8_t r6_1 = vextq_u16(row_2, row_3, 1);
  528. const uint16x8_t r8_0 = vextq_u16(row_0, row_1, 7);
  529. const uint16x8_t r9_0 = vextq_u16(row_0, row_1, 6);
  530. const uint16x8_t ra_0 = vextq_u16(row_0, row_1, 5);
  531. const uint16x8_t rb_0 = vextq_u16(row_0, row_1, 4);
  532. const uint16x8_t rc_0 = vextq_u16(row_0, row_1, 3);
  533. const uint16x8_t rd_0 = vextq_u16(row_0, row_1, 2);
  534. const uint16x8_t re_0 = vextq_u16(row_0, row_1, 1);
  535. (void)bd;
  536. d135_store_16(&dst, stride, r0_0, r0_1);
  537. d135_store_16(&dst, stride, r1_0, r1_1);
  538. d135_store_16(&dst, stride, r2_0, r2_1);
  539. d135_store_16(&dst, stride, r3_0, r3_1);
  540. d135_store_16(&dst, stride, r4_0, r4_1);
  541. d135_store_16(&dst, stride, r5_0, r5_1);
  542. d135_store_16(&dst, stride, r6_0, r6_1);
  543. d135_store_16(&dst, stride, row_1, row_2);
  544. d135_store_16(&dst, stride, r8_0, r0_0);
  545. d135_store_16(&dst, stride, r9_0, r1_0);
  546. d135_store_16(&dst, stride, ra_0, r2_0);
  547. d135_store_16(&dst, stride, rb_0, r3_0);
  548. d135_store_16(&dst, stride, rc_0, r4_0);
  549. d135_store_16(&dst, stride, rd_0, r5_0);
  550. d135_store_16(&dst, stride, re_0, r6_0);
  551. vst1q_u16(dst, row_0);
  552. dst += 8;
  553. vst1q_u16(dst, row_1);
  554. }
  555. void vpx_highbd_d135_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
  556. const uint16_t *above,
  557. const uint16_t *left, int bd) {
  558. const uint16x8_t LL01234567 = vld1q_u16(left + 16);
  559. const uint16x8_t LL89abcdef = vld1q_u16(left + 24);
  560. const uint16x4_t LL3210 = vrev64_u16(vget_low_u16(LL01234567));
  561. const uint16x4_t LL7654 = vrev64_u16(vget_high_u16(LL01234567));
  562. const uint16x4_t LLba98 = vrev64_u16(vget_low_u16(LL89abcdef));
  563. const uint16x4_t LLfedc = vrev64_u16(vget_high_u16(LL89abcdef));
  564. const uint16x8_t LL76543210 = vcombine_u16(LL7654, LL3210);
  565. const uint16x8_t LLfedcba98 = vcombine_u16(LLfedc, LLba98);
  566. const uint16x8_t LLedcba987 = vextq_u16(LLfedcba98, LL76543210, 1);
  567. const uint16x8_t LLdcba9876 = vextq_u16(LLfedcba98, LL76543210, 2);
  568. const uint16x8_t avg_0 = vhaddq_u16(LLfedcba98, LLdcba9876);
  569. uint16x8_t row_0 = vrhaddq_u16(avg_0, LLedcba987);
  570. const uint16x8_t LU01234567 = vld1q_u16(left);
  571. const uint16x8_t LU89abcdef = vld1q_u16(left + 8);
  572. const uint16x4_t LU3210 = vrev64_u16(vget_low_u16(LU01234567));
  573. const uint16x4_t LU7654 = vrev64_u16(vget_high_u16(LU01234567));
  574. const uint16x4_t LUba98 = vrev64_u16(vget_low_u16(LU89abcdef));
  575. const uint16x4_t LUfedc = vrev64_u16(vget_high_u16(LU89abcdef));
  576. const uint16x8_t LU76543210 = vcombine_u16(LU7654, LU3210);
  577. const uint16x8_t LUfedcba98 = vcombine_u16(LUfedc, LUba98);
  578. const uint16x8_t LL6543210Uf = vextq_u16(LL76543210, LUfedcba98, 1);
  579. const uint16x8_t LL543210Ufe = vextq_u16(LL76543210, LUfedcba98, 2);
  580. const uint16x8_t avg_1 = vhaddq_u16(LL76543210, LL543210Ufe);
  581. uint16x8_t row_1 = vrhaddq_u16(avg_1, LL6543210Uf);
  582. const uint16x8_t LUedcba987 = vextq_u16(LUfedcba98, LU76543210, 1);
  583. const uint16x8_t LUdcba9876 = vextq_u16(LUfedcba98, LU76543210, 2);
  584. const uint16x8_t avg_2 = vhaddq_u16(LUfedcba98, LUdcba9876);
  585. uint16x8_t row_2 = vrhaddq_u16(avg_2, LUedcba987);
  586. const uint16x8_t XAL0123456 = vld1q_u16(above - 1);
  587. const uint16x8_t LU6543210X = vextq_u16(LU76543210, XAL0123456, 1);
  588. const uint16x8_t LU543210XA0 = vextq_u16(LU76543210, XAL0123456, 2);
  589. const uint16x8_t avg_3 = vhaddq_u16(LU76543210, LU543210XA0);
  590. uint16x8_t row_3 = vrhaddq_u16(avg_3, LU6543210X);
  591. const uint16x8_t AL01234567 = vld1q_u16(above);
  592. const uint16x8_t AL12345678 = vld1q_u16(above + 1);
  593. const uint16x8_t avg_4 = vhaddq_u16(XAL0123456, AL12345678);
  594. uint16x8_t row_4 = vrhaddq_u16(avg_4, AL01234567);
  595. const uint16x8_t AL789abcde = vld1q_u16(above + 7);
  596. const uint16x8_t AL89abcdef = vld1q_u16(above + 8);
  597. const uint16x8_t AL9abcdefg = vld1q_u16(above + 9);
  598. const uint16x8_t avg_5 = vhaddq_u16(AL789abcde, AL9abcdefg);
  599. uint16x8_t row_5 = vrhaddq_u16(avg_5, AL89abcdef);
  600. const uint16x8_t ALfR0123456 = vld1q_u16(above + 15);
  601. const uint16x8_t AR01234567 = vld1q_u16(above + 16);
  602. const uint16x8_t AR12345678 = vld1q_u16(above + 17);
  603. const uint16x8_t avg_6 = vhaddq_u16(ALfR0123456, AR12345678);
  604. uint16x8_t row_6 = vrhaddq_u16(avg_6, AR01234567);
  605. const uint16x8_t AR789abcde = vld1q_u16(above + 23);
  606. const uint16x8_t AR89abcdef = vld1q_u16(above + 24);
  607. const uint16x8_t AR9abcdef_ = vld1q_u16(above + 25);
  608. const uint16x8_t avg_7 = vhaddq_u16(AR789abcde, AR9abcdef_);
  609. uint16x8_t row_7 = vrhaddq_u16(avg_7, AR89abcdef);
  610. int i, j;
  611. (void)bd;
  612. dst += 31 * stride;
  613. for (i = 0; i < 4; ++i) {
  614. for (j = 0; j < 8; ++j) {
  615. vst1q_u16(dst, row_0);
  616. dst += 8;
  617. vst1q_u16(dst, row_1);
  618. dst += 8;
  619. vst1q_u16(dst, row_2);
  620. dst += 8;
  621. vst1q_u16(dst, row_3);
  622. dst -= stride + 24;
  623. row_0 = vextq_u16(row_0, row_1, 1);
  624. row_1 = vextq_u16(row_1, row_2, 1);
  625. row_2 = vextq_u16(row_2, row_3, 1);
  626. row_3 = vextq_u16(row_3, row_4, 1);
  627. row_4 = vextq_u16(row_4, row_4, 1);
  628. }
  629. row_4 = row_5;
  630. row_5 = row_6;
  631. row_6 = row_7;
  632. }
  633. }
  634. //------------------------------------------------------------------------------
  635. void vpx_highbd_v_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
  636. const uint16_t *above,
  637. const uint16_t *left, int bd) {
  638. const uint16x4_t row = vld1_u16(above);
  639. int i;
  640. (void)left;
  641. (void)bd;
  642. for (i = 0; i < 4; i++, dst += stride) {
  643. vst1_u16(dst, row);
  644. }
  645. }
  646. void vpx_highbd_v_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
  647. const uint16_t *above,
  648. const uint16_t *left, int bd) {
  649. const uint16x8_t row = vld1q_u16(above);
  650. int i;
  651. (void)left;
  652. (void)bd;
  653. for (i = 0; i < 8; i++, dst += stride) {
  654. vst1q_u16(dst, row);
  655. }
  656. }
  657. void vpx_highbd_v_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
  658. const uint16_t *above,
  659. const uint16_t *left, int bd) {
  660. const uint16x8x2_t row = vld2q_u16(above);
  661. int i;
  662. (void)left;
  663. (void)bd;
  664. for (i = 0; i < 16; i++, dst += stride) {
  665. vst2q_u16(dst, row);
  666. }
  667. }
  668. void vpx_highbd_v_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
  669. const uint16_t *above,
  670. const uint16_t *left, int bd) {
  671. const uint16x8x2_t row0 = vld2q_u16(above);
  672. const uint16x8x2_t row1 = vld2q_u16(above + 16);
  673. int i;
  674. (void)left;
  675. (void)bd;
  676. for (i = 0; i < 32; i++) {
  677. vst2q_u16(dst, row0);
  678. dst += 16;
  679. vst2q_u16(dst, row1);
  680. dst += stride - 16;
  681. }
  682. }
  683. // -----------------------------------------------------------------------------
  684. void vpx_highbd_h_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
  685. const uint16_t *above,
  686. const uint16_t *left, int bd) {
  687. const uint16x4_t left_u16 = vld1_u16(left);
  688. uint16x4_t row;
  689. (void)above;
  690. (void)bd;
  691. row = vdup_lane_u16(left_u16, 0);
  692. vst1_u16(dst, row);
  693. dst += stride;
  694. row = vdup_lane_u16(left_u16, 1);
  695. vst1_u16(dst, row);
  696. dst += stride;
  697. row = vdup_lane_u16(left_u16, 2);
  698. vst1_u16(dst, row);
  699. dst += stride;
  700. row = vdup_lane_u16(left_u16, 3);
  701. vst1_u16(dst, row);
  702. }
  703. void vpx_highbd_h_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
  704. const uint16_t *above,
  705. const uint16_t *left, int bd) {
  706. const uint16x8_t left_u16 = vld1q_u16(left);
  707. const uint16x4_t left_low = vget_low_u16(left_u16);
  708. const uint16x4_t left_high = vget_high_u16(left_u16);
  709. uint16x8_t row;
  710. (void)above;
  711. (void)bd;
  712. row = vdupq_lane_u16(left_low, 0);
  713. vst1q_u16(dst, row);
  714. dst += stride;
  715. row = vdupq_lane_u16(left_low, 1);
  716. vst1q_u16(dst, row);
  717. dst += stride;
  718. row = vdupq_lane_u16(left_low, 2);
  719. vst1q_u16(dst, row);
  720. dst += stride;
  721. row = vdupq_lane_u16(left_low, 3);
  722. vst1q_u16(dst, row);
  723. dst += stride;
  724. row = vdupq_lane_u16(left_high, 0);
  725. vst1q_u16(dst, row);
  726. dst += stride;
  727. row = vdupq_lane_u16(left_high, 1);
  728. vst1q_u16(dst, row);
  729. dst += stride;
  730. row = vdupq_lane_u16(left_high, 2);
  731. vst1q_u16(dst, row);
  732. dst += stride;
  733. row = vdupq_lane_u16(left_high, 3);
  734. vst1q_u16(dst, row);
  735. }
  736. static INLINE void h_store_16(uint16_t **dst, const ptrdiff_t stride,
  737. const uint16x8_t row) {
  738. // Note: vst1q is faster than vst2q
  739. vst1q_u16(*dst, row);
  740. *dst += 8;
  741. vst1q_u16(*dst, row);
  742. *dst += stride - 8;
  743. }
  744. void vpx_highbd_h_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
  745. const uint16_t *above,
  746. const uint16_t *left, int bd) {
  747. int i;
  748. (void)above;
  749. (void)bd;
  750. for (i = 0; i < 2; i++, left += 8) {
  751. const uint16x8_t left_u16q = vld1q_u16(left);
  752. const uint16x4_t left_low = vget_low_u16(left_u16q);
  753. const uint16x4_t left_high = vget_high_u16(left_u16q);
  754. uint16x8_t row;
  755. row = vdupq_lane_u16(left_low, 0);
  756. h_store_16(&dst, stride, row);
  757. row = vdupq_lane_u16(left_low, 1);
  758. h_store_16(&dst, stride, row);
  759. row = vdupq_lane_u16(left_low, 2);
  760. h_store_16(&dst, stride, row);
  761. row = vdupq_lane_u16(left_low, 3);
  762. h_store_16(&dst, stride, row);
  763. row = vdupq_lane_u16(left_high, 0);
  764. h_store_16(&dst, stride, row);
  765. row = vdupq_lane_u16(left_high, 1);
  766. h_store_16(&dst, stride, row);
  767. row = vdupq_lane_u16(left_high, 2);
  768. h_store_16(&dst, stride, row);
  769. row = vdupq_lane_u16(left_high, 3);
  770. h_store_16(&dst, stride, row);
  771. }
  772. }
  773. static INLINE void h_store_32(uint16_t **dst, const ptrdiff_t stride,
  774. const uint16x8_t row) {
  775. // Note: vst1q is faster than vst2q
  776. vst1q_u16(*dst, row);
  777. *dst += 8;
  778. vst1q_u16(*dst, row);
  779. *dst += 8;
  780. vst1q_u16(*dst, row);
  781. *dst += 8;
  782. vst1q_u16(*dst, row);
  783. *dst += stride - 24;
  784. }
  785. void vpx_highbd_h_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
  786. const uint16_t *above,
  787. const uint16_t *left, int bd) {
  788. int i;
  789. (void)above;
  790. (void)bd;
  791. for (i = 0; i < 4; i++, left += 8) {
  792. const uint16x8_t left_u16q = vld1q_u16(left);
  793. const uint16x4_t left_low = vget_low_u16(left_u16q);
  794. const uint16x4_t left_high = vget_high_u16(left_u16q);
  795. uint16x8_t row;
  796. row = vdupq_lane_u16(left_low, 0);
  797. h_store_32(&dst, stride, row);
  798. row = vdupq_lane_u16(left_low, 1);
  799. h_store_32(&dst, stride, row);
  800. row = vdupq_lane_u16(left_low, 2);
  801. h_store_32(&dst, stride, row);
  802. row = vdupq_lane_u16(left_low, 3);
  803. h_store_32(&dst, stride, row);
  804. row = vdupq_lane_u16(left_high, 0);
  805. h_store_32(&dst, stride, row);
  806. row = vdupq_lane_u16(left_high, 1);
  807. h_store_32(&dst, stride, row);
  808. row = vdupq_lane_u16(left_high, 2);
  809. h_store_32(&dst, stride, row);
  810. row = vdupq_lane_u16(left_high, 3);
  811. h_store_32(&dst, stride, row);
  812. }
  813. }
  814. // -----------------------------------------------------------------------------
  815. void vpx_highbd_tm_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
  816. const uint16_t *above,
  817. const uint16_t *left, int bd) {
  818. const int16x8_t max = vmovq_n_s16((1 << bd) - 1);
  819. const int16x8_t top_left = vld1q_dup_s16((const int16_t *)(above - 1));
  820. const int16x4_t above_s16d = vld1_s16((const int16_t *)above);
  821. const int16x8_t above_s16 = vcombine_s16(above_s16d, above_s16d);
  822. const int16x4_t left_s16 = vld1_s16((const int16_t *)left);
  823. const int16x8_t sub = vsubq_s16(above_s16, top_left);
  824. int16x8_t sum;
  825. uint16x8_t row;
  826. sum = vcombine_s16(vdup_lane_s16(left_s16, 0), vdup_lane_s16(left_s16, 1));
  827. sum = vaddq_s16(sum, sub);
  828. sum = vminq_s16(sum, max);
  829. row = vqshluq_n_s16(sum, 0);
  830. vst1_u16(dst, vget_low_u16(row));
  831. dst += stride;
  832. vst1_u16(dst, vget_high_u16(row));
  833. dst += stride;
  834. sum = vcombine_s16(vdup_lane_s16(left_s16, 2), vdup_lane_s16(left_s16, 3));
  835. sum = vaddq_s16(sum, sub);
  836. sum = vminq_s16(sum, max);
  837. row = vqshluq_n_s16(sum, 0);
  838. vst1_u16(dst, vget_low_u16(row));
  839. dst += stride;
  840. vst1_u16(dst, vget_high_u16(row));
  841. }
  842. static INLINE void tm_8_kernel(uint16_t **dst, const ptrdiff_t stride,
  843. const int16x8_t left_dup, const int16x8_t sub,
  844. const int16x8_t max) {
  845. uint16x8_t row;
  846. int16x8_t sum = vaddq_s16(left_dup, sub);
  847. sum = vminq_s16(sum, max);
  848. row = vqshluq_n_s16(sum, 0);
  849. vst1q_u16(*dst, row);
  850. *dst += stride;
  851. }
  852. void vpx_highbd_tm_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
  853. const uint16_t *above,
  854. const uint16_t *left, int bd) {
  855. const int16x8_t max = vmovq_n_s16((1 << bd) - 1);
  856. const int16x8_t top_left = vld1q_dup_s16((const int16_t *)(above - 1));
  857. const int16x8_t above_s16 = vld1q_s16((const int16_t *)above);
  858. const int16x8_t left_s16 = vld1q_s16((const int16_t *)left);
  859. const int16x8_t sub = vsubq_s16(above_s16, top_left);
  860. int16x4_t left_s16d;
  861. int16x8_t left_dup;
  862. int i;
  863. left_s16d = vget_low_s16(left_s16);
  864. for (i = 0; i < 2; i++, left_s16d = vget_high_s16(left_s16)) {
  865. left_dup = vdupq_lane_s16(left_s16d, 0);
  866. tm_8_kernel(&dst, stride, left_dup, sub, max);
  867. left_dup = vdupq_lane_s16(left_s16d, 1);
  868. tm_8_kernel(&dst, stride, left_dup, sub, max);
  869. left_dup = vdupq_lane_s16(left_s16d, 2);
  870. tm_8_kernel(&dst, stride, left_dup, sub, max);
  871. left_dup = vdupq_lane_s16(left_s16d, 3);
  872. tm_8_kernel(&dst, stride, left_dup, sub, max);
  873. }
  874. }
  875. static INLINE void tm_16_kernel(uint16_t **dst, const ptrdiff_t stride,
  876. const int16x8_t left_dup, const int16x8_t sub0,
  877. const int16x8_t sub1, const int16x8_t max) {
  878. uint16x8_t row0, row1;
  879. int16x8_t sum0 = vaddq_s16(left_dup, sub0);
  880. int16x8_t sum1 = vaddq_s16(left_dup, sub1);
  881. sum0 = vminq_s16(sum0, max);
  882. sum1 = vminq_s16(sum1, max);
  883. row0 = vqshluq_n_s16(sum0, 0);
  884. row1 = vqshluq_n_s16(sum1, 0);
  885. vst1q_u16(*dst, row0);
  886. *dst += 8;
  887. vst1q_u16(*dst, row1);
  888. *dst += stride - 8;
  889. }
  890. void vpx_highbd_tm_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
  891. const uint16_t *above,
  892. const uint16_t *left, int bd) {
  893. const int16x8_t max = vmovq_n_s16((1 << bd) - 1);
  894. const int16x8_t top_left = vld1q_dup_s16((const int16_t *)(above - 1));
  895. const int16x8_t above0 = vld1q_s16((const int16_t *)above);
  896. const int16x8_t above1 = vld1q_s16((const int16_t *)(above + 8));
  897. const int16x8_t sub0 = vsubq_s16(above0, top_left);
  898. const int16x8_t sub1 = vsubq_s16(above1, top_left);
  899. int16x8_t left_dup;
  900. int i, j;
  901. for (j = 0; j < 2; j++, left += 8) {
  902. const int16x8_t left_s16q = vld1q_s16((const int16_t *)left);
  903. int16x4_t left_s16d = vget_low_s16(left_s16q);
  904. for (i = 0; i < 2; i++, left_s16d = vget_high_s16(left_s16q)) {
  905. left_dup = vdupq_lane_s16(left_s16d, 0);
  906. tm_16_kernel(&dst, stride, left_dup, sub0, sub1, max);
  907. left_dup = vdupq_lane_s16(left_s16d, 1);
  908. tm_16_kernel(&dst, stride, left_dup, sub0, sub1, max);
  909. left_dup = vdupq_lane_s16(left_s16d, 2);
  910. tm_16_kernel(&dst, stride, left_dup, sub0, sub1, max);
  911. left_dup = vdupq_lane_s16(left_s16d, 3);
  912. tm_16_kernel(&dst, stride, left_dup, sub0, sub1, max);
  913. }
  914. }
  915. }
  916. static INLINE void tm_32_kernel(uint16_t **dst, const ptrdiff_t stride,
  917. const int16x8_t left_dup, const int16x8_t sub0,
  918. const int16x8_t sub1, const int16x8_t sub2,
  919. const int16x8_t sub3, const int16x8_t max) {
  920. uint16x8_t row0, row1, row2, row3;
  921. int16x8_t sum0 = vaddq_s16(left_dup, sub0);
  922. int16x8_t sum1 = vaddq_s16(left_dup, sub1);
  923. int16x8_t sum2 = vaddq_s16(left_dup, sub2);
  924. int16x8_t sum3 = vaddq_s16(left_dup, sub3);
  925. sum0 = vminq_s16(sum0, max);
  926. sum1 = vminq_s16(sum1, max);
  927. sum2 = vminq_s16(sum2, max);
  928. sum3 = vminq_s16(sum3, max);
  929. row0 = vqshluq_n_s16(sum0, 0);
  930. row1 = vqshluq_n_s16(sum1, 0);
  931. row2 = vqshluq_n_s16(sum2, 0);
  932. row3 = vqshluq_n_s16(sum3, 0);
  933. vst1q_u16(*dst, row0);
  934. *dst += 8;
  935. vst1q_u16(*dst, row1);
  936. *dst += 8;
  937. vst1q_u16(*dst, row2);
  938. *dst += 8;
  939. vst1q_u16(*dst, row3);
  940. *dst += stride - 24;
  941. }
  942. void vpx_highbd_tm_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
  943. const uint16_t *above,
  944. const uint16_t *left, int bd) {
  945. const int16x8_t max = vmovq_n_s16((1 << bd) - 1);
  946. const int16x8_t top_left = vld1q_dup_s16((const int16_t *)(above - 1));
  947. const int16x8_t above0 = vld1q_s16((const int16_t *)above);
  948. const int16x8_t above1 = vld1q_s16((const int16_t *)(above + 8));
  949. const int16x8_t above2 = vld1q_s16((const int16_t *)(above + 16));
  950. const int16x8_t above3 = vld1q_s16((const int16_t *)(above + 24));
  951. const int16x8_t sub0 = vsubq_s16(above0, top_left);
  952. const int16x8_t sub1 = vsubq_s16(above1, top_left);
  953. const int16x8_t sub2 = vsubq_s16(above2, top_left);
  954. const int16x8_t sub3 = vsubq_s16(above3, top_left);
  955. int16x8_t left_dup;
  956. int i, j;
  957. for (i = 0; i < 4; i++, left += 8) {
  958. const int16x8_t left_s16q = vld1q_s16((const int16_t *)left);
  959. int16x4_t left_s16d = vget_low_s16(left_s16q);
  960. for (j = 0; j < 2; j++, left_s16d = vget_high_s16(left_s16q)) {
  961. left_dup = vdupq_lane_s16(left_s16d, 0);
  962. tm_32_kernel(&dst, stride, left_dup, sub0, sub1, sub2, sub3, max);
  963. left_dup = vdupq_lane_s16(left_s16d, 1);
  964. tm_32_kernel(&dst, stride, left_dup, sub0, sub1, sub2, sub3, max);
  965. left_dup = vdupq_lane_s16(left_s16d, 2);
  966. tm_32_kernel(&dst, stride, left_dup, sub0, sub1, sub2, sub3, max);
  967. left_dup = vdupq_lane_s16(left_s16d, 3);
  968. tm_32_kernel(&dst, stride, left_dup, sub0, sub1, sub2, sub3, max);
  969. }
  970. }
  971. }