idct32x32_135_add_neon.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <arm_neon.h>
  11. #include "./vpx_config.h"
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx_dsp/arm/idct_neon.h"
  14. #include "vpx_dsp/arm/mem_neon.h"
  15. #include "vpx_dsp/arm/transpose_neon.h"
  16. #include "vpx_dsp/txfm_common.h"
  17. static INLINE void load_8x8_s16(const tran_low_t *input, int16x8_t *const in0,
  18. int16x8_t *const in1, int16x8_t *const in2,
  19. int16x8_t *const in3, int16x8_t *const in4,
  20. int16x8_t *const in5, int16x8_t *const in6,
  21. int16x8_t *const in7) {
  22. *in0 = load_tran_low_to_s16q(input);
  23. input += 32;
  24. *in1 = load_tran_low_to_s16q(input);
  25. input += 32;
  26. *in2 = load_tran_low_to_s16q(input);
  27. input += 32;
  28. *in3 = load_tran_low_to_s16q(input);
  29. input += 32;
  30. *in4 = load_tran_low_to_s16q(input);
  31. input += 32;
  32. *in5 = load_tran_low_to_s16q(input);
  33. input += 32;
  34. *in6 = load_tran_low_to_s16q(input);
  35. input += 32;
  36. *in7 = load_tran_low_to_s16q(input);
  37. }
  38. static INLINE void load_4x8_s16(const tran_low_t *input, int16x4_t *const in0,
  39. int16x4_t *const in1, int16x4_t *const in2,
  40. int16x4_t *const in3, int16x4_t *const in4,
  41. int16x4_t *const in5, int16x4_t *const in6,
  42. int16x4_t *const in7) {
  43. *in0 = load_tran_low_to_s16d(input);
  44. input += 32;
  45. *in1 = load_tran_low_to_s16d(input);
  46. input += 32;
  47. *in2 = load_tran_low_to_s16d(input);
  48. input += 32;
  49. *in3 = load_tran_low_to_s16d(input);
  50. input += 32;
  51. *in4 = load_tran_low_to_s16d(input);
  52. input += 32;
  53. *in5 = load_tran_low_to_s16d(input);
  54. input += 32;
  55. *in6 = load_tran_low_to_s16d(input);
  56. input += 32;
  57. *in7 = load_tran_low_to_s16d(input);
  58. }
  59. // Only for the first pass of the _135_ variant. Since it only uses values from
  60. // the top left 16x16 it can safely assume all the remaining values are 0 and
  61. // skip an awful lot of calculations. In fact, only the first 12 columns make
  62. // the cut. None of the elements in the 13th, 14th, 15th or 16th columns are
  63. // used so it skips any calls to input[12|13|14|15] too.
  64. // In C this does a single row of 32 for each call. Here it transposes the top
  65. // left 12x8 to allow using SIMD.
  66. // vp9/common/vp9_scan.c:vp9_default_iscan_32x32 arranges the first 135 non-zero
  67. // coefficients as follows:
  68. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  69. // 0 0 2 5 10 17 25 38 47 62 83 101 121
  70. // 1 1 4 8 15 22 30 45 58 74 92 112 133
  71. // 2 3 7 12 18 28 36 52 64 82 102 118
  72. // 3 6 11 16 23 31 43 60 73 90 109 126
  73. // 4 9 14 19 29 37 50 65 78 98 116 134
  74. // 5 13 20 26 35 44 54 72 85 105 123
  75. // 6 21 27 33 42 53 63 80 94 113 132
  76. // 7 24 32 39 48 57 71 88 104 120
  77. // 8 34 40 46 56 68 81 96 111 130
  78. // 9 41 49 55 67 77 91 107 124
  79. // 10 51 59 66 76 89 99 119 131
  80. // 11 61 69 75 87 100 114 129
  81. // 12 70 79 86 97 108 122
  82. // 13 84 93 103 110 125
  83. // 14 98 106 115 127
  84. // 15 117 128
  85. void vpx_idct32_12_neon(const tran_low_t *const input, int16_t *output) {
  86. int16x4_t tmp[8];
  87. int16x8_t in[12], s1[32], s2[32], s3[32], s4[32], s5[32], s6[32], s7[32];
  88. load_8x8_s16(input, &in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
  89. &in[7]);
  90. transpose_s16_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
  91. &in[7]);
  92. load_4x8_s16(input + 8, &tmp[0], &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5],
  93. &tmp[6], &tmp[7]);
  94. transpose_s16_4x8(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6],
  95. tmp[7], &in[8], &in[9], &in[10], &in[11]);
  96. // stage 1
  97. s1[16] = multiply_shift_and_narrow_s16(in[1], cospi_31_64);
  98. s1[31] = multiply_shift_and_narrow_s16(in[1], cospi_1_64);
  99. s1[18] = multiply_shift_and_narrow_s16(in[9], cospi_23_64);
  100. s1[29] = multiply_shift_and_narrow_s16(in[9], cospi_9_64);
  101. s1[19] = multiply_shift_and_narrow_s16(in[7], -cospi_25_64);
  102. s1[28] = multiply_shift_and_narrow_s16(in[7], cospi_7_64);
  103. s1[20] = multiply_shift_and_narrow_s16(in[5], cospi_27_64);
  104. s1[27] = multiply_shift_and_narrow_s16(in[5], cospi_5_64);
  105. s1[21] = multiply_shift_and_narrow_s16(in[11], -cospi_21_64);
  106. s1[26] = multiply_shift_and_narrow_s16(in[11], cospi_11_64);
  107. s1[23] = multiply_shift_and_narrow_s16(in[3], -cospi_29_64);
  108. s1[24] = multiply_shift_and_narrow_s16(in[3], cospi_3_64);
  109. // stage 2
  110. s2[8] = multiply_shift_and_narrow_s16(in[2], cospi_30_64);
  111. s2[15] = multiply_shift_and_narrow_s16(in[2], cospi_2_64);
  112. s2[10] = multiply_shift_and_narrow_s16(in[10], cospi_22_64);
  113. s2[13] = multiply_shift_and_narrow_s16(in[10], cospi_10_64);
  114. s2[11] = multiply_shift_and_narrow_s16(in[6], -cospi_26_64);
  115. s2[12] = multiply_shift_and_narrow_s16(in[6], cospi_6_64);
  116. s2[18] = vsubq_s16(s1[19], s1[18]);
  117. s2[19] = vaddq_s16(s1[18], s1[19]);
  118. s2[20] = vaddq_s16(s1[20], s1[21]);
  119. s2[21] = vsubq_s16(s1[20], s1[21]);
  120. s2[26] = vsubq_s16(s1[27], s1[26]);
  121. s2[27] = vaddq_s16(s1[26], s1[27]);
  122. s2[28] = vaddq_s16(s1[28], s1[29]);
  123. s2[29] = vsubq_s16(s1[28], s1[29]);
  124. // stage 3
  125. s3[4] = multiply_shift_and_narrow_s16(in[4], cospi_28_64);
  126. s3[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64);
  127. s3[10] = vsubq_s16(s2[11], s2[10]);
  128. s3[11] = vaddq_s16(s2[10], s2[11]);
  129. s3[12] = vaddq_s16(s2[12], s2[13]);
  130. s3[13] = vsubq_s16(s2[12], s2[13]);
  131. s3[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31],
  132. cospi_28_64);
  133. s3[30] = multiply_accumulate_shift_and_narrow_s16(s1[16], cospi_28_64, s1[31],
  134. cospi_4_64);
  135. s3[18] = multiply_accumulate_shift_and_narrow_s16(s2[18], -cospi_28_64,
  136. s2[29], -cospi_4_64);
  137. s3[29] = multiply_accumulate_shift_and_narrow_s16(s2[18], -cospi_4_64, s2[29],
  138. cospi_28_64);
  139. s3[21] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_20_64,
  140. s2[26], cospi_12_64);
  141. s3[26] = multiply_accumulate_shift_and_narrow_s16(s2[21], cospi_12_64, s2[26],
  142. cospi_20_64);
  143. s3[22] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_12_64,
  144. s1[24], -cospi_20_64);
  145. s3[25] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_20_64,
  146. s1[24], cospi_12_64);
  147. // stage 4
  148. s4[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64);
  149. s4[2] = multiply_shift_and_narrow_s16(in[8], cospi_24_64);
  150. s4[3] = multiply_shift_and_narrow_s16(in[8], cospi_8_64);
  151. s4[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15],
  152. cospi_24_64);
  153. s4[14] = multiply_accumulate_shift_and_narrow_s16(s2[8], cospi_24_64, s2[15],
  154. cospi_8_64);
  155. s4[10] = multiply_accumulate_shift_and_narrow_s16(s3[10], -cospi_24_64,
  156. s3[13], -cospi_8_64);
  157. s4[13] = multiply_accumulate_shift_and_narrow_s16(s3[10], -cospi_8_64, s3[13],
  158. cospi_24_64);
  159. s4[16] = vaddq_s16(s1[16], s2[19]);
  160. s4[17] = vaddq_s16(s3[17], s3[18]);
  161. s4[18] = vsubq_s16(s3[17], s3[18]);
  162. s4[19] = vsubq_s16(s1[16], s2[19]);
  163. s4[20] = vsubq_s16(s1[23], s2[20]);
  164. s4[21] = vsubq_s16(s3[22], s3[21]);
  165. s4[22] = vaddq_s16(s3[21], s3[22]);
  166. s4[23] = vaddq_s16(s2[20], s1[23]);
  167. s4[24] = vaddq_s16(s1[24], s2[27]);
  168. s4[25] = vaddq_s16(s3[25], s3[26]);
  169. s4[26] = vsubq_s16(s3[25], s3[26]);
  170. s4[27] = vsubq_s16(s1[24], s2[27]);
  171. s4[28] = vsubq_s16(s1[31], s2[28]);
  172. s4[29] = vsubq_s16(s3[30], s3[29]);
  173. s4[30] = vaddq_s16(s3[29], s3[30]);
  174. s4[31] = vaddq_s16(s2[28], s1[31]);
  175. // stage 5
  176. s5[0] = vaddq_s16(s4[0], s4[3]);
  177. s5[1] = vaddq_s16(s4[0], s4[2]);
  178. s5[2] = vsubq_s16(s4[0], s4[2]);
  179. s5[3] = vsubq_s16(s4[0], s4[3]);
  180. s5[5] = sub_multiply_shift_and_narrow_s16(s3[7], s3[4], cospi_16_64);
  181. s5[6] = add_multiply_shift_and_narrow_s16(s3[4], s3[7], cospi_16_64);
  182. s5[8] = vaddq_s16(s2[8], s3[11]);
  183. s5[9] = vaddq_s16(s4[9], s4[10]);
  184. s5[10] = vsubq_s16(s4[9], s4[10]);
  185. s5[11] = vsubq_s16(s2[8], s3[11]);
  186. s5[12] = vsubq_s16(s2[15], s3[12]);
  187. s5[13] = vsubq_s16(s4[14], s4[13]);
  188. s5[14] = vaddq_s16(s4[13], s4[14]);
  189. s5[15] = vaddq_s16(s2[15], s3[12]);
  190. s5[18] = multiply_accumulate_shift_and_narrow_s16(s4[18], -cospi_8_64, s4[29],
  191. cospi_24_64);
  192. s5[29] = multiply_accumulate_shift_and_narrow_s16(s4[18], cospi_24_64, s4[29],
  193. cospi_8_64);
  194. s5[19] = multiply_accumulate_shift_and_narrow_s16(s4[19], -cospi_8_64, s4[28],
  195. cospi_24_64);
  196. s5[28] = multiply_accumulate_shift_and_narrow_s16(s4[19], cospi_24_64, s4[28],
  197. cospi_8_64);
  198. s5[20] = multiply_accumulate_shift_and_narrow_s16(s4[20], -cospi_24_64,
  199. s4[27], -cospi_8_64);
  200. s5[27] = multiply_accumulate_shift_and_narrow_s16(s4[20], -cospi_8_64, s4[27],
  201. cospi_24_64);
  202. s5[21] = multiply_accumulate_shift_and_narrow_s16(s4[21], -cospi_24_64,
  203. s4[26], -cospi_8_64);
  204. s5[26] = multiply_accumulate_shift_and_narrow_s16(s4[21], -cospi_8_64, s4[26],
  205. cospi_24_64);
  206. // stage 6
  207. s6[0] = vaddq_s16(s5[0], s3[7]);
  208. s6[1] = vaddq_s16(s5[1], s5[6]);
  209. s6[2] = vaddq_s16(s5[2], s5[5]);
  210. s6[3] = vaddq_s16(s5[3], s3[4]);
  211. s6[4] = vsubq_s16(s5[3], s3[4]);
  212. s6[5] = vsubq_s16(s5[2], s5[5]);
  213. s6[6] = vsubq_s16(s5[1], s5[6]);
  214. s6[7] = vsubq_s16(s5[0], s3[7]);
  215. s6[10] = sub_multiply_shift_and_narrow_s16(s5[13], s5[10], cospi_16_64);
  216. s6[13] = add_multiply_shift_and_narrow_s16(s5[10], s5[13], cospi_16_64);
  217. s6[11] = sub_multiply_shift_and_narrow_s16(s5[12], s5[11], cospi_16_64);
  218. s6[12] = add_multiply_shift_and_narrow_s16(s5[11], s5[12], cospi_16_64);
  219. s6[16] = vaddq_s16(s4[16], s4[23]);
  220. s6[17] = vaddq_s16(s4[17], s4[22]);
  221. s6[18] = vaddq_s16(s5[18], s5[21]);
  222. s6[19] = vaddq_s16(s5[19], s5[20]);
  223. s6[20] = vsubq_s16(s5[19], s5[20]);
  224. s6[21] = vsubq_s16(s5[18], s5[21]);
  225. s6[22] = vsubq_s16(s4[17], s4[22]);
  226. s6[23] = vsubq_s16(s4[16], s4[23]);
  227. s6[24] = vsubq_s16(s4[31], s4[24]);
  228. s6[25] = vsubq_s16(s4[30], s4[25]);
  229. s6[26] = vsubq_s16(s5[29], s5[26]);
  230. s6[27] = vsubq_s16(s5[28], s5[27]);
  231. s6[28] = vaddq_s16(s5[27], s5[28]);
  232. s6[29] = vaddq_s16(s5[26], s5[29]);
  233. s6[30] = vaddq_s16(s4[25], s4[30]);
  234. s6[31] = vaddq_s16(s4[24], s4[31]);
  235. // stage 7
  236. s7[0] = vaddq_s16(s6[0], s5[15]);
  237. s7[1] = vaddq_s16(s6[1], s5[14]);
  238. s7[2] = vaddq_s16(s6[2], s6[13]);
  239. s7[3] = vaddq_s16(s6[3], s6[12]);
  240. s7[4] = vaddq_s16(s6[4], s6[11]);
  241. s7[5] = vaddq_s16(s6[5], s6[10]);
  242. s7[6] = vaddq_s16(s6[6], s5[9]);
  243. s7[7] = vaddq_s16(s6[7], s5[8]);
  244. s7[8] = vsubq_s16(s6[7], s5[8]);
  245. s7[9] = vsubq_s16(s6[6], s5[9]);
  246. s7[10] = vsubq_s16(s6[5], s6[10]);
  247. s7[11] = vsubq_s16(s6[4], s6[11]);
  248. s7[12] = vsubq_s16(s6[3], s6[12]);
  249. s7[13] = vsubq_s16(s6[2], s6[13]);
  250. s7[14] = vsubq_s16(s6[1], s5[14]);
  251. s7[15] = vsubq_s16(s6[0], s5[15]);
  252. s7[20] = sub_multiply_shift_and_narrow_s16(s6[27], s6[20], cospi_16_64);
  253. s7[27] = add_multiply_shift_and_narrow_s16(s6[20], s6[27], cospi_16_64);
  254. s7[21] = sub_multiply_shift_and_narrow_s16(s6[26], s6[21], cospi_16_64);
  255. s7[26] = add_multiply_shift_and_narrow_s16(s6[21], s6[26], cospi_16_64);
  256. s7[22] = sub_multiply_shift_and_narrow_s16(s6[25], s6[22], cospi_16_64);
  257. s7[25] = add_multiply_shift_and_narrow_s16(s6[22], s6[25], cospi_16_64);
  258. s7[23] = sub_multiply_shift_and_narrow_s16(s6[24], s6[23], cospi_16_64);
  259. s7[24] = add_multiply_shift_and_narrow_s16(s6[23], s6[24], cospi_16_64);
  260. // final stage
  261. vst1q_s16(output, vaddq_s16(s7[0], s6[31]));
  262. output += 16;
  263. vst1q_s16(output, vaddq_s16(s7[1], s6[30]));
  264. output += 16;
  265. vst1q_s16(output, vaddq_s16(s7[2], s6[29]));
  266. output += 16;
  267. vst1q_s16(output, vaddq_s16(s7[3], s6[28]));
  268. output += 16;
  269. vst1q_s16(output, vaddq_s16(s7[4], s7[27]));
  270. output += 16;
  271. vst1q_s16(output, vaddq_s16(s7[5], s7[26]));
  272. output += 16;
  273. vst1q_s16(output, vaddq_s16(s7[6], s7[25]));
  274. output += 16;
  275. vst1q_s16(output, vaddq_s16(s7[7], s7[24]));
  276. output += 16;
  277. vst1q_s16(output, vaddq_s16(s7[8], s7[23]));
  278. output += 16;
  279. vst1q_s16(output, vaddq_s16(s7[9], s7[22]));
  280. output += 16;
  281. vst1q_s16(output, vaddq_s16(s7[10], s7[21]));
  282. output += 16;
  283. vst1q_s16(output, vaddq_s16(s7[11], s7[20]));
  284. output += 16;
  285. vst1q_s16(output, vaddq_s16(s7[12], s6[19]));
  286. output += 16;
  287. vst1q_s16(output, vaddq_s16(s7[13], s6[18]));
  288. output += 16;
  289. vst1q_s16(output, vaddq_s16(s7[14], s6[17]));
  290. output += 16;
  291. vst1q_s16(output, vaddq_s16(s7[15], s6[16]));
  292. output += 16;
  293. vst1q_s16(output, vsubq_s16(s7[15], s6[16]));
  294. output += 16;
  295. vst1q_s16(output, vsubq_s16(s7[14], s6[17]));
  296. output += 16;
  297. vst1q_s16(output, vsubq_s16(s7[13], s6[18]));
  298. output += 16;
  299. vst1q_s16(output, vsubq_s16(s7[12], s6[19]));
  300. output += 16;
  301. vst1q_s16(output, vsubq_s16(s7[11], s7[20]));
  302. output += 16;
  303. vst1q_s16(output, vsubq_s16(s7[10], s7[21]));
  304. output += 16;
  305. vst1q_s16(output, vsubq_s16(s7[9], s7[22]));
  306. output += 16;
  307. vst1q_s16(output, vsubq_s16(s7[8], s7[23]));
  308. output += 16;
  309. vst1q_s16(output, vsubq_s16(s7[7], s7[24]));
  310. output += 16;
  311. vst1q_s16(output, vsubq_s16(s7[6], s7[25]));
  312. output += 16;
  313. vst1q_s16(output, vsubq_s16(s7[5], s7[26]));
  314. output += 16;
  315. vst1q_s16(output, vsubq_s16(s7[4], s7[27]));
  316. output += 16;
  317. vst1q_s16(output, vsubq_s16(s7[3], s6[28]));
  318. output += 16;
  319. vst1q_s16(output, vsubq_s16(s7[2], s6[29]));
  320. output += 16;
  321. vst1q_s16(output, vsubq_s16(s7[1], s6[30]));
  322. output += 16;
  323. vst1q_s16(output, vsubq_s16(s7[0], s6[31]));
  324. }
  325. void vpx_idct32_16_neon(const int16_t *const input, void *const output,
  326. const int stride, const int highbd_flag) {
  327. int16x8_t in[16], s1[32], s2[32], s3[32], s4[32], s5[32], s6[32], s7[32],
  328. out[32];
  329. load_and_transpose_s16_8x8(input, 16, &in[0], &in[1], &in[2], &in[3], &in[4],
  330. &in[5], &in[6], &in[7]);
  331. load_and_transpose_s16_8x8(input + 8, 16, &in[8], &in[9], &in[10], &in[11],
  332. &in[12], &in[13], &in[14], &in[15]);
  333. // stage 1
  334. s1[16] = multiply_shift_and_narrow_s16(in[1], cospi_31_64);
  335. s1[31] = multiply_shift_and_narrow_s16(in[1], cospi_1_64);
  336. s1[17] = multiply_shift_and_narrow_s16(in[15], -cospi_17_64);
  337. s1[30] = multiply_shift_and_narrow_s16(in[15], cospi_15_64);
  338. s1[18] = multiply_shift_and_narrow_s16(in[9], cospi_23_64);
  339. s1[29] = multiply_shift_and_narrow_s16(in[9], cospi_9_64);
  340. s1[19] = multiply_shift_and_narrow_s16(in[7], -cospi_25_64);
  341. s1[28] = multiply_shift_and_narrow_s16(in[7], cospi_7_64);
  342. s1[20] = multiply_shift_and_narrow_s16(in[5], cospi_27_64);
  343. s1[27] = multiply_shift_and_narrow_s16(in[5], cospi_5_64);
  344. s1[21] = multiply_shift_and_narrow_s16(in[11], -cospi_21_64);
  345. s1[26] = multiply_shift_and_narrow_s16(in[11], cospi_11_64);
  346. s1[22] = multiply_shift_and_narrow_s16(in[13], cospi_19_64);
  347. s1[25] = multiply_shift_and_narrow_s16(in[13], cospi_13_64);
  348. s1[23] = multiply_shift_and_narrow_s16(in[3], -cospi_29_64);
  349. s1[24] = multiply_shift_and_narrow_s16(in[3], cospi_3_64);
  350. // stage 2
  351. s2[8] = multiply_shift_and_narrow_s16(in[2], cospi_30_64);
  352. s2[15] = multiply_shift_and_narrow_s16(in[2], cospi_2_64);
  353. s2[9] = multiply_shift_and_narrow_s16(in[14], -cospi_18_64);
  354. s2[14] = multiply_shift_and_narrow_s16(in[14], cospi_14_64);
  355. s2[10] = multiply_shift_and_narrow_s16(in[10], cospi_22_64);
  356. s2[13] = multiply_shift_and_narrow_s16(in[10], cospi_10_64);
  357. s2[11] = multiply_shift_and_narrow_s16(in[6], -cospi_26_64);
  358. s2[12] = multiply_shift_and_narrow_s16(in[6], cospi_6_64);
  359. s2[16] = vaddq_s16(s1[16], s1[17]);
  360. s2[17] = vsubq_s16(s1[16], s1[17]);
  361. s2[18] = vsubq_s16(s1[19], s1[18]);
  362. s2[19] = vaddq_s16(s1[18], s1[19]);
  363. s2[20] = vaddq_s16(s1[20], s1[21]);
  364. s2[21] = vsubq_s16(s1[20], s1[21]);
  365. s2[22] = vsubq_s16(s1[23], s1[22]);
  366. s2[23] = vaddq_s16(s1[22], s1[23]);
  367. s2[24] = vaddq_s16(s1[24], s1[25]);
  368. s2[25] = vsubq_s16(s1[24], s1[25]);
  369. s2[26] = vsubq_s16(s1[27], s1[26]);
  370. s2[27] = vaddq_s16(s1[26], s1[27]);
  371. s2[28] = vaddq_s16(s1[28], s1[29]);
  372. s2[29] = vsubq_s16(s1[28], s1[29]);
  373. s2[30] = vsubq_s16(s1[31], s1[30]);
  374. s2[31] = vaddq_s16(s1[30], s1[31]);
  375. // stage 3
  376. s3[4] = multiply_shift_and_narrow_s16(in[4], cospi_28_64);
  377. s3[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64);
  378. s3[5] = multiply_shift_and_narrow_s16(in[12], -cospi_20_64);
  379. s3[6] = multiply_shift_and_narrow_s16(in[12], cospi_12_64);
  380. s3[8] = vaddq_s16(s2[8], s2[9]);
  381. s3[9] = vsubq_s16(s2[8], s2[9]);
  382. s3[10] = vsubq_s16(s2[11], s2[10]);
  383. s3[11] = vaddq_s16(s2[10], s2[11]);
  384. s3[12] = vaddq_s16(s2[12], s2[13]);
  385. s3[13] = vsubq_s16(s2[12], s2[13]);
  386. s3[14] = vsubq_s16(s2[15], s2[14]);
  387. s3[15] = vaddq_s16(s2[14], s2[15]);
  388. s3[17] = multiply_accumulate_shift_and_narrow_s16(s2[17], -cospi_4_64, s2[30],
  389. cospi_28_64);
  390. s3[30] = multiply_accumulate_shift_and_narrow_s16(s2[17], cospi_28_64, s2[30],
  391. cospi_4_64);
  392. s3[18] = multiply_accumulate_shift_and_narrow_s16(s2[18], -cospi_28_64,
  393. s2[29], -cospi_4_64);
  394. s3[29] = multiply_accumulate_shift_and_narrow_s16(s2[18], -cospi_4_64, s2[29],
  395. cospi_28_64);
  396. s3[21] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_20_64,
  397. s2[26], cospi_12_64);
  398. s3[26] = multiply_accumulate_shift_and_narrow_s16(s2[21], cospi_12_64, s2[26],
  399. cospi_20_64);
  400. s3[22] = multiply_accumulate_shift_and_narrow_s16(s2[22], -cospi_12_64,
  401. s2[25], -cospi_20_64);
  402. s3[25] = multiply_accumulate_shift_and_narrow_s16(s2[22], -cospi_20_64,
  403. s2[25], cospi_12_64);
  404. // stage 4
  405. s4[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64);
  406. s4[2] = multiply_shift_and_narrow_s16(in[8], cospi_24_64);
  407. s4[3] = multiply_shift_and_narrow_s16(in[8], cospi_8_64);
  408. s4[4] = vaddq_s16(s3[4], s3[5]);
  409. s4[5] = vsubq_s16(s3[4], s3[5]);
  410. s4[6] = vsubq_s16(s3[7], s3[6]);
  411. s4[7] = vaddq_s16(s3[6], s3[7]);
  412. s4[9] = multiply_accumulate_shift_and_narrow_s16(s3[9], -cospi_8_64, s3[14],
  413. cospi_24_64);
  414. s4[14] = multiply_accumulate_shift_and_narrow_s16(s3[9], cospi_24_64, s3[14],
  415. cospi_8_64);
  416. s4[10] = multiply_accumulate_shift_and_narrow_s16(s3[10], -cospi_24_64,
  417. s3[13], -cospi_8_64);
  418. s4[13] = multiply_accumulate_shift_and_narrow_s16(s3[10], -cospi_8_64, s3[13],
  419. cospi_24_64);
  420. s4[16] = vaddq_s16(s2[16], s2[19]);
  421. s4[17] = vaddq_s16(s3[17], s3[18]);
  422. s4[18] = vsubq_s16(s3[17], s3[18]);
  423. s4[19] = vsubq_s16(s2[16], s2[19]);
  424. s4[20] = vsubq_s16(s2[23], s2[20]);
  425. s4[21] = vsubq_s16(s3[22], s3[21]);
  426. s4[22] = vaddq_s16(s3[21], s3[22]);
  427. s4[23] = vaddq_s16(s2[20], s2[23]);
  428. s4[24] = vaddq_s16(s2[24], s2[27]);
  429. s4[25] = vaddq_s16(s3[25], s3[26]);
  430. s4[26] = vsubq_s16(s3[25], s3[26]);
  431. s4[27] = vsubq_s16(s2[24], s2[27]);
  432. s4[28] = vsubq_s16(s2[31], s2[28]);
  433. s4[29] = vsubq_s16(s3[30], s3[29]);
  434. s4[30] = vaddq_s16(s3[29], s3[30]);
  435. s4[31] = vaddq_s16(s2[28], s2[31]);
  436. // stage 5
  437. s5[0] = vaddq_s16(s4[0], s4[3]);
  438. s5[1] = vaddq_s16(s4[0], s4[2]);
  439. s5[2] = vsubq_s16(s4[0], s4[2]);
  440. s5[3] = vsubq_s16(s4[0], s4[3]);
  441. s5[5] = sub_multiply_shift_and_narrow_s16(s4[6], s4[5], cospi_16_64);
  442. s5[6] = add_multiply_shift_and_narrow_s16(s4[5], s4[6], cospi_16_64);
  443. s5[8] = vaddq_s16(s3[8], s3[11]);
  444. s5[9] = vaddq_s16(s4[9], s4[10]);
  445. s5[10] = vsubq_s16(s4[9], s4[10]);
  446. s5[11] = vsubq_s16(s3[8], s3[11]);
  447. s5[12] = vsubq_s16(s3[15], s3[12]);
  448. s5[13] = vsubq_s16(s4[14], s4[13]);
  449. s5[14] = vaddq_s16(s4[13], s4[14]);
  450. s5[15] = vaddq_s16(s3[15], s3[12]);
  451. s5[18] = multiply_accumulate_shift_and_narrow_s16(s4[18], -cospi_8_64, s4[29],
  452. cospi_24_64);
  453. s5[29] = multiply_accumulate_shift_and_narrow_s16(s4[18], cospi_24_64, s4[29],
  454. cospi_8_64);
  455. s5[19] = multiply_accumulate_shift_and_narrow_s16(s4[19], -cospi_8_64, s4[28],
  456. cospi_24_64);
  457. s5[28] = multiply_accumulate_shift_and_narrow_s16(s4[19], cospi_24_64, s4[28],
  458. cospi_8_64);
  459. s5[20] = multiply_accumulate_shift_and_narrow_s16(s4[20], -cospi_24_64,
  460. s4[27], -cospi_8_64);
  461. s5[27] = multiply_accumulate_shift_and_narrow_s16(s4[20], -cospi_8_64, s4[27],
  462. cospi_24_64);
  463. s5[21] = multiply_accumulate_shift_and_narrow_s16(s4[21], -cospi_24_64,
  464. s4[26], -cospi_8_64);
  465. s5[26] = multiply_accumulate_shift_and_narrow_s16(s4[21], -cospi_8_64, s4[26],
  466. cospi_24_64);
  467. // stage 6
  468. s6[0] = vaddq_s16(s5[0], s4[7]);
  469. s6[1] = vaddq_s16(s5[1], s5[6]);
  470. s6[2] = vaddq_s16(s5[2], s5[5]);
  471. s6[3] = vaddq_s16(s5[3], s4[4]);
  472. s6[4] = vsubq_s16(s5[3], s4[4]);
  473. s6[5] = vsubq_s16(s5[2], s5[5]);
  474. s6[6] = vsubq_s16(s5[1], s5[6]);
  475. s6[7] = vsubq_s16(s5[0], s4[7]);
  476. s6[10] = sub_multiply_shift_and_narrow_s16(s5[13], s5[10], cospi_16_64);
  477. s6[13] = add_multiply_shift_and_narrow_s16(s5[10], s5[13], cospi_16_64);
  478. s6[11] = sub_multiply_shift_and_narrow_s16(s5[12], s5[11], cospi_16_64);
  479. s6[12] = add_multiply_shift_and_narrow_s16(s5[11], s5[12], cospi_16_64);
  480. s6[16] = vaddq_s16(s4[16], s4[23]);
  481. s6[17] = vaddq_s16(s4[17], s4[22]);
  482. s6[18] = vaddq_s16(s5[18], s5[21]);
  483. s6[19] = vaddq_s16(s5[19], s5[20]);
  484. s6[20] = vsubq_s16(s5[19], s5[20]);
  485. s6[21] = vsubq_s16(s5[18], s5[21]);
  486. s6[22] = vsubq_s16(s4[17], s4[22]);
  487. s6[23] = vsubq_s16(s4[16], s4[23]);
  488. s6[24] = vsubq_s16(s4[31], s4[24]);
  489. s6[25] = vsubq_s16(s4[30], s4[25]);
  490. s6[26] = vsubq_s16(s5[29], s5[26]);
  491. s6[27] = vsubq_s16(s5[28], s5[27]);
  492. s6[28] = vaddq_s16(s5[27], s5[28]);
  493. s6[29] = vaddq_s16(s5[26], s5[29]);
  494. s6[30] = vaddq_s16(s4[25], s4[30]);
  495. s6[31] = vaddq_s16(s4[24], s4[31]);
  496. // stage 7
  497. s7[0] = vaddq_s16(s6[0], s5[15]);
  498. s7[1] = vaddq_s16(s6[1], s5[14]);
  499. s7[2] = vaddq_s16(s6[2], s6[13]);
  500. s7[3] = vaddq_s16(s6[3], s6[12]);
  501. s7[4] = vaddq_s16(s6[4], s6[11]);
  502. s7[5] = vaddq_s16(s6[5], s6[10]);
  503. s7[6] = vaddq_s16(s6[6], s5[9]);
  504. s7[7] = vaddq_s16(s6[7], s5[8]);
  505. s7[8] = vsubq_s16(s6[7], s5[8]);
  506. s7[9] = vsubq_s16(s6[6], s5[9]);
  507. s7[10] = vsubq_s16(s6[5], s6[10]);
  508. s7[11] = vsubq_s16(s6[4], s6[11]);
  509. s7[12] = vsubq_s16(s6[3], s6[12]);
  510. s7[13] = vsubq_s16(s6[2], s6[13]);
  511. s7[14] = vsubq_s16(s6[1], s5[14]);
  512. s7[15] = vsubq_s16(s6[0], s5[15]);
  513. s7[20] = sub_multiply_shift_and_narrow_s16(s6[27], s6[20], cospi_16_64);
  514. s7[27] = add_multiply_shift_and_narrow_s16(s6[20], s6[27], cospi_16_64);
  515. s7[21] = sub_multiply_shift_and_narrow_s16(s6[26], s6[21], cospi_16_64);
  516. s7[26] = add_multiply_shift_and_narrow_s16(s6[21], s6[26], cospi_16_64);
  517. s7[22] = sub_multiply_shift_and_narrow_s16(s6[25], s6[22], cospi_16_64);
  518. s7[25] = add_multiply_shift_and_narrow_s16(s6[22], s6[25], cospi_16_64);
  519. s7[23] = sub_multiply_shift_and_narrow_s16(s6[24], s6[23], cospi_16_64);
  520. s7[24] = add_multiply_shift_and_narrow_s16(s6[23], s6[24], cospi_16_64);
  521. // final stage
  522. out[0] = final_add(s7[0], s6[31]);
  523. out[1] = final_add(s7[1], s6[30]);
  524. out[2] = final_add(s7[2], s6[29]);
  525. out[3] = final_add(s7[3], s6[28]);
  526. out[4] = final_add(s7[4], s7[27]);
  527. out[5] = final_add(s7[5], s7[26]);
  528. out[6] = final_add(s7[6], s7[25]);
  529. out[7] = final_add(s7[7], s7[24]);
  530. out[8] = final_add(s7[8], s7[23]);
  531. out[9] = final_add(s7[9], s7[22]);
  532. out[10] = final_add(s7[10], s7[21]);
  533. out[11] = final_add(s7[11], s7[20]);
  534. out[12] = final_add(s7[12], s6[19]);
  535. out[13] = final_add(s7[13], s6[18]);
  536. out[14] = final_add(s7[14], s6[17]);
  537. out[15] = final_add(s7[15], s6[16]);
  538. out[16] = final_sub(s7[15], s6[16]);
  539. out[17] = final_sub(s7[14], s6[17]);
  540. out[18] = final_sub(s7[13], s6[18]);
  541. out[19] = final_sub(s7[12], s6[19]);
  542. out[20] = final_sub(s7[11], s7[20]);
  543. out[21] = final_sub(s7[10], s7[21]);
  544. out[22] = final_sub(s7[9], s7[22]);
  545. out[23] = final_sub(s7[8], s7[23]);
  546. out[24] = final_sub(s7[7], s7[24]);
  547. out[25] = final_sub(s7[6], s7[25]);
  548. out[26] = final_sub(s7[5], s7[26]);
  549. out[27] = final_sub(s7[4], s7[27]);
  550. out[28] = final_sub(s7[3], s6[28]);
  551. out[29] = final_sub(s7[2], s6[29]);
  552. out[30] = final_sub(s7[1], s6[30]);
  553. out[31] = final_sub(s7[0], s6[31]);
  554. if (highbd_flag) {
  555. highbd_add_and_store_bd8(out, output, stride);
  556. } else {
  557. uint8_t *const outputT = (uint8_t *)output;
  558. add_and_store_u8_s16(out + 0, outputT, stride);
  559. add_and_store_u8_s16(out + 8, outputT + (8 * stride), stride);
  560. add_and_store_u8_s16(out + 16, outputT + (16 * stride), stride);
  561. add_and_store_u8_s16(out + 24, outputT + (24 * stride), stride);
  562. }
  563. }
  564. void vpx_idct32x32_135_add_neon(const tran_low_t *input, uint8_t *dest,
  565. int stride) {
  566. int i;
  567. int16_t temp[32 * 16];
  568. int16_t *t = temp;
  569. vpx_idct32_12_neon(input, temp);
  570. vpx_idct32_12_neon(input + 32 * 8, temp + 8);
  571. for (i = 0; i < 32; i += 8) {
  572. vpx_idct32_16_neon(t, dest, stride, 0);
  573. t += (16 * 8);
  574. dest += 8;
  575. }
  576. }