idct32x32_34_add_neon.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /*
  2. * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <arm_neon.h>
  11. #include "./vpx_config.h"
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx_dsp/arm/idct_neon.h"
  14. #include "vpx_dsp/arm/mem_neon.h"
  15. #include "vpx_dsp/arm/transpose_neon.h"
  16. #include "vpx_dsp/txfm_common.h"
  17. // Only for the first pass of the _34_ variant. Since it only uses values from
  18. // the top left 8x8 it can safely assume all the remaining values are 0 and skip
  19. // an awful lot of calculations. In fact, only the first 6 columns make the cut.
  20. // None of the elements in the 7th or 8th column are used so it skips any calls
  21. // to input[67] too.
  22. // In C this does a single row of 32 for each call. Here it transposes the top
  23. // left 8x8 to allow using SIMD.
  24. // vp9/common/vp9_scan.c:vp9_default_iscan_32x32 arranges the first 34 non-zero
  25. // coefficients as follows:
  26. // 0 1 2 3 4 5 6 7
  27. // 0 0 2 5 10 17 25
  28. // 1 1 4 8 15 22 30
  29. // 2 3 7 12 18 28
  30. // 3 6 11 16 23 31
  31. // 4 9 14 19 29
  32. // 5 13 20 26
  33. // 6 21 27 33
  34. // 7 24 32
  35. void vpx_idct32_6_neon(const tran_low_t *input, int16_t *output) {
  36. int16x8_t in[8], s1[32], s2[32], s3[32];
  37. in[0] = load_tran_low_to_s16q(input);
  38. input += 32;
  39. in[1] = load_tran_low_to_s16q(input);
  40. input += 32;
  41. in[2] = load_tran_low_to_s16q(input);
  42. input += 32;
  43. in[3] = load_tran_low_to_s16q(input);
  44. input += 32;
  45. in[4] = load_tran_low_to_s16q(input);
  46. input += 32;
  47. in[5] = load_tran_low_to_s16q(input);
  48. input += 32;
  49. in[6] = load_tran_low_to_s16q(input);
  50. input += 32;
  51. in[7] = load_tran_low_to_s16q(input);
  52. transpose_s16_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
  53. &in[7]);
  54. // stage 1
  55. // input[1] * cospi_31_64 - input[31] * cospi_1_64 (but input[31] == 0)
  56. s1[16] = multiply_shift_and_narrow_s16(in[1], cospi_31_64);
  57. // input[1] * cospi_1_64 + input[31] * cospi_31_64 (but input[31] == 0)
  58. s1[31] = multiply_shift_and_narrow_s16(in[1], cospi_1_64);
  59. s1[20] = multiply_shift_and_narrow_s16(in[5], cospi_27_64);
  60. s1[27] = multiply_shift_and_narrow_s16(in[5], cospi_5_64);
  61. s1[23] = multiply_shift_and_narrow_s16(in[3], -cospi_29_64);
  62. s1[24] = multiply_shift_and_narrow_s16(in[3], cospi_3_64);
  63. // stage 2
  64. s2[8] = multiply_shift_and_narrow_s16(in[2], cospi_30_64);
  65. s2[15] = multiply_shift_and_narrow_s16(in[2], cospi_2_64);
  66. // stage 3
  67. s1[4] = multiply_shift_and_narrow_s16(in[4], cospi_28_64);
  68. s1[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64);
  69. s1[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31],
  70. cospi_28_64);
  71. s1[30] = multiply_accumulate_shift_and_narrow_s16(s1[16], cospi_28_64, s1[31],
  72. cospi_4_64);
  73. s1[21] = multiply_accumulate_shift_and_narrow_s16(s1[20], -cospi_20_64,
  74. s1[27], cospi_12_64);
  75. s1[26] = multiply_accumulate_shift_and_narrow_s16(s1[20], cospi_12_64, s1[27],
  76. cospi_20_64);
  77. s1[22] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_12_64,
  78. s1[24], -cospi_20_64);
  79. s1[25] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_20_64,
  80. s1[24], cospi_12_64);
  81. // stage 4
  82. s1[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64);
  83. s2[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15],
  84. cospi_24_64);
  85. s2[14] = multiply_accumulate_shift_and_narrow_s16(s2[8], cospi_24_64, s2[15],
  86. cospi_8_64);
  87. s2[20] = vsubq_s16(s1[23], s1[20]);
  88. s2[21] = vsubq_s16(s1[22], s1[21]);
  89. s2[22] = vaddq_s16(s1[21], s1[22]);
  90. s2[23] = vaddq_s16(s1[20], s1[23]);
  91. s2[24] = vaddq_s16(s1[24], s1[27]);
  92. s2[25] = vaddq_s16(s1[25], s1[26]);
  93. s2[26] = vsubq_s16(s1[25], s1[26]);
  94. s2[27] = vsubq_s16(s1[24], s1[27]);
  95. // stage 5
  96. s1[5] = sub_multiply_shift_and_narrow_s16(s1[7], s1[4], cospi_16_64);
  97. s1[6] = add_multiply_shift_and_narrow_s16(s1[4], s1[7], cospi_16_64);
  98. s1[18] = multiply_accumulate_shift_and_narrow_s16(s1[17], -cospi_8_64, s1[30],
  99. cospi_24_64);
  100. s1[29] = multiply_accumulate_shift_and_narrow_s16(s1[17], cospi_24_64, s1[30],
  101. cospi_8_64);
  102. s1[19] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_8_64, s1[31],
  103. cospi_24_64);
  104. s1[28] = multiply_accumulate_shift_and_narrow_s16(s1[16], cospi_24_64, s1[31],
  105. cospi_8_64);
  106. s1[20] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_24_64,
  107. s2[27], -cospi_8_64);
  108. s1[27] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_8_64, s2[27],
  109. cospi_24_64);
  110. s1[21] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_24_64,
  111. s2[26], -cospi_8_64);
  112. s1[26] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_8_64, s2[26],
  113. cospi_24_64);
  114. // stage 6
  115. s2[0] = vaddq_s16(s1[0], s1[7]);
  116. s2[1] = vaddq_s16(s1[0], s1[6]);
  117. s2[2] = vaddq_s16(s1[0], s1[5]);
  118. s2[3] = vaddq_s16(s1[0], s1[4]);
  119. s2[4] = vsubq_s16(s1[0], s1[4]);
  120. s2[5] = vsubq_s16(s1[0], s1[5]);
  121. s2[6] = vsubq_s16(s1[0], s1[6]);
  122. s2[7] = vsubq_s16(s1[0], s1[7]);
  123. s2[10] = sub_multiply_shift_and_narrow_s16(s2[14], s2[9], cospi_16_64);
  124. s2[13] = add_multiply_shift_and_narrow_s16(s2[9], s2[14], cospi_16_64);
  125. s2[11] = sub_multiply_shift_and_narrow_s16(s2[15], s2[8], cospi_16_64);
  126. s2[12] = add_multiply_shift_and_narrow_s16(s2[8], s2[15], cospi_16_64);
  127. s2[16] = vaddq_s16(s1[16], s2[23]);
  128. s2[17] = vaddq_s16(s1[17], s2[22]);
  129. s2[18] = vaddq_s16(s1[18], s1[21]);
  130. s2[19] = vaddq_s16(s1[19], s1[20]);
  131. s2[20] = vsubq_s16(s1[19], s1[20]);
  132. s2[21] = vsubq_s16(s1[18], s1[21]);
  133. s2[22] = vsubq_s16(s1[17], s2[22]);
  134. s2[23] = vsubq_s16(s1[16], s2[23]);
  135. s3[24] = vsubq_s16(s1[31], s2[24]);
  136. s3[25] = vsubq_s16(s1[30], s2[25]);
  137. s3[26] = vsubq_s16(s1[29], s1[26]);
  138. s3[27] = vsubq_s16(s1[28], s1[27]);
  139. s2[28] = vaddq_s16(s1[27], s1[28]);
  140. s2[29] = vaddq_s16(s1[26], s1[29]);
  141. s2[30] = vaddq_s16(s2[25], s1[30]);
  142. s2[31] = vaddq_s16(s2[24], s1[31]);
  143. // stage 7
  144. s1[0] = vaddq_s16(s2[0], s2[15]);
  145. s1[1] = vaddq_s16(s2[1], s2[14]);
  146. s1[2] = vaddq_s16(s2[2], s2[13]);
  147. s1[3] = vaddq_s16(s2[3], s2[12]);
  148. s1[4] = vaddq_s16(s2[4], s2[11]);
  149. s1[5] = vaddq_s16(s2[5], s2[10]);
  150. s1[6] = vaddq_s16(s2[6], s2[9]);
  151. s1[7] = vaddq_s16(s2[7], s2[8]);
  152. s1[8] = vsubq_s16(s2[7], s2[8]);
  153. s1[9] = vsubq_s16(s2[6], s2[9]);
  154. s1[10] = vsubq_s16(s2[5], s2[10]);
  155. s1[11] = vsubq_s16(s2[4], s2[11]);
  156. s1[12] = vsubq_s16(s2[3], s2[12]);
  157. s1[13] = vsubq_s16(s2[2], s2[13]);
  158. s1[14] = vsubq_s16(s2[1], s2[14]);
  159. s1[15] = vsubq_s16(s2[0], s2[15]);
  160. s1[20] = sub_multiply_shift_and_narrow_s16(s3[27], s2[20], cospi_16_64);
  161. s1[27] = add_multiply_shift_and_narrow_s16(s2[20], s3[27], cospi_16_64);
  162. s1[21] = sub_multiply_shift_and_narrow_s16(s3[26], s2[21], cospi_16_64);
  163. s1[26] = add_multiply_shift_and_narrow_s16(s2[21], s3[26], cospi_16_64);
  164. s1[22] = sub_multiply_shift_and_narrow_s16(s3[25], s2[22], cospi_16_64);
  165. s1[25] = add_multiply_shift_and_narrow_s16(s2[22], s3[25], cospi_16_64);
  166. s1[23] = sub_multiply_shift_and_narrow_s16(s3[24], s2[23], cospi_16_64);
  167. s1[24] = add_multiply_shift_and_narrow_s16(s2[23], s3[24], cospi_16_64);
  168. // final stage
  169. vst1q_s16(output, vaddq_s16(s1[0], s2[31]));
  170. output += 8;
  171. vst1q_s16(output, vaddq_s16(s1[1], s2[30]));
  172. output += 8;
  173. vst1q_s16(output, vaddq_s16(s1[2], s2[29]));
  174. output += 8;
  175. vst1q_s16(output, vaddq_s16(s1[3], s2[28]));
  176. output += 8;
  177. vst1q_s16(output, vaddq_s16(s1[4], s1[27]));
  178. output += 8;
  179. vst1q_s16(output, vaddq_s16(s1[5], s1[26]));
  180. output += 8;
  181. vst1q_s16(output, vaddq_s16(s1[6], s1[25]));
  182. output += 8;
  183. vst1q_s16(output, vaddq_s16(s1[7], s1[24]));
  184. output += 8;
  185. vst1q_s16(output, vaddq_s16(s1[8], s1[23]));
  186. output += 8;
  187. vst1q_s16(output, vaddq_s16(s1[9], s1[22]));
  188. output += 8;
  189. vst1q_s16(output, vaddq_s16(s1[10], s1[21]));
  190. output += 8;
  191. vst1q_s16(output, vaddq_s16(s1[11], s1[20]));
  192. output += 8;
  193. vst1q_s16(output, vaddq_s16(s1[12], s2[19]));
  194. output += 8;
  195. vst1q_s16(output, vaddq_s16(s1[13], s2[18]));
  196. output += 8;
  197. vst1q_s16(output, vaddq_s16(s1[14], s2[17]));
  198. output += 8;
  199. vst1q_s16(output, vaddq_s16(s1[15], s2[16]));
  200. output += 8;
  201. vst1q_s16(output, vsubq_s16(s1[15], s2[16]));
  202. output += 8;
  203. vst1q_s16(output, vsubq_s16(s1[14], s2[17]));
  204. output += 8;
  205. vst1q_s16(output, vsubq_s16(s1[13], s2[18]));
  206. output += 8;
  207. vst1q_s16(output, vsubq_s16(s1[12], s2[19]));
  208. output += 8;
  209. vst1q_s16(output, vsubq_s16(s1[11], s1[20]));
  210. output += 8;
  211. vst1q_s16(output, vsubq_s16(s1[10], s1[21]));
  212. output += 8;
  213. vst1q_s16(output, vsubq_s16(s1[9], s1[22]));
  214. output += 8;
  215. vst1q_s16(output, vsubq_s16(s1[8], s1[23]));
  216. output += 8;
  217. vst1q_s16(output, vsubq_s16(s1[7], s1[24]));
  218. output += 8;
  219. vst1q_s16(output, vsubq_s16(s1[6], s1[25]));
  220. output += 8;
  221. vst1q_s16(output, vsubq_s16(s1[5], s1[26]));
  222. output += 8;
  223. vst1q_s16(output, vsubq_s16(s1[4], s1[27]));
  224. output += 8;
  225. vst1q_s16(output, vsubq_s16(s1[3], s2[28]));
  226. output += 8;
  227. vst1q_s16(output, vsubq_s16(s1[2], s2[29]));
  228. output += 8;
  229. vst1q_s16(output, vsubq_s16(s1[1], s2[30]));
  230. output += 8;
  231. vst1q_s16(output, vsubq_s16(s1[0], s2[31]));
  232. }
  233. void vpx_idct32_8_neon(const int16_t *input, void *const output, int stride,
  234. const int highbd_flag) {
  235. int16x8_t in[8], s1[32], s2[32], s3[32], out[32];
  236. load_and_transpose_s16_8x8(input, 8, &in[0], &in[1], &in[2], &in[3], &in[4],
  237. &in[5], &in[6], &in[7]);
  238. // stage 1
  239. s1[16] = multiply_shift_and_narrow_s16(in[1], cospi_31_64);
  240. s1[31] = multiply_shift_and_narrow_s16(in[1], cospi_1_64);
  241. // Different for _8_
  242. s1[19] = multiply_shift_and_narrow_s16(in[7], -cospi_25_64);
  243. s1[28] = multiply_shift_and_narrow_s16(in[7], cospi_7_64);
  244. s1[20] = multiply_shift_and_narrow_s16(in[5], cospi_27_64);
  245. s1[27] = multiply_shift_and_narrow_s16(in[5], cospi_5_64);
  246. s1[23] = multiply_shift_and_narrow_s16(in[3], -cospi_29_64);
  247. s1[24] = multiply_shift_and_narrow_s16(in[3], cospi_3_64);
  248. // stage 2
  249. s2[8] = multiply_shift_and_narrow_s16(in[2], cospi_30_64);
  250. s2[15] = multiply_shift_and_narrow_s16(in[2], cospi_2_64);
  251. s2[11] = multiply_shift_and_narrow_s16(in[6], -cospi_26_64);
  252. s2[12] = multiply_shift_and_narrow_s16(in[6], cospi_6_64);
  253. // stage 3
  254. s1[4] = multiply_shift_and_narrow_s16(in[4], cospi_28_64);
  255. s1[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64);
  256. s1[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31],
  257. cospi_28_64);
  258. s1[30] = multiply_accumulate_shift_and_narrow_s16(s1[16], cospi_28_64, s1[31],
  259. cospi_4_64);
  260. // Different for _8_
  261. s1[18] = multiply_accumulate_shift_and_narrow_s16(s1[19], -cospi_28_64,
  262. s1[28], -cospi_4_64);
  263. s1[29] = multiply_accumulate_shift_and_narrow_s16(s1[19], -cospi_4_64, s1[28],
  264. cospi_28_64);
  265. s1[21] = multiply_accumulate_shift_and_narrow_s16(s1[20], -cospi_20_64,
  266. s1[27], cospi_12_64);
  267. s1[26] = multiply_accumulate_shift_and_narrow_s16(s1[20], cospi_12_64, s1[27],
  268. cospi_20_64);
  269. s1[22] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_12_64,
  270. s1[24], -cospi_20_64);
  271. s1[25] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_20_64,
  272. s1[24], cospi_12_64);
  273. // stage 4
  274. s1[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64);
  275. s2[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15],
  276. cospi_24_64);
  277. s2[14] = multiply_accumulate_shift_and_narrow_s16(s2[8], cospi_24_64, s2[15],
  278. cospi_8_64);
  279. s2[10] = multiply_accumulate_shift_and_narrow_s16(s2[11], -cospi_24_64,
  280. s2[12], -cospi_8_64);
  281. s2[13] = multiply_accumulate_shift_and_narrow_s16(s2[11], -cospi_8_64, s2[12],
  282. cospi_24_64);
  283. s2[16] = vaddq_s16(s1[16], s1[19]);
  284. s2[17] = vaddq_s16(s1[17], s1[18]);
  285. s2[18] = vsubq_s16(s1[17], s1[18]);
  286. s2[19] = vsubq_s16(s1[16], s1[19]);
  287. s2[20] = vsubq_s16(s1[23], s1[20]);
  288. s2[21] = vsubq_s16(s1[22], s1[21]);
  289. s2[22] = vaddq_s16(s1[21], s1[22]);
  290. s2[23] = vaddq_s16(s1[20], s1[23]);
  291. s2[24] = vaddq_s16(s1[24], s1[27]);
  292. s2[25] = vaddq_s16(s1[25], s1[26]);
  293. s2[26] = vsubq_s16(s1[25], s1[26]);
  294. s2[27] = vsubq_s16(s1[24], s1[27]);
  295. s2[28] = vsubq_s16(s1[31], s1[28]);
  296. s2[29] = vsubq_s16(s1[30], s1[29]);
  297. s2[30] = vaddq_s16(s1[29], s1[30]);
  298. s2[31] = vaddq_s16(s1[28], s1[31]);
  299. // stage 5
  300. s1[5] = sub_multiply_shift_and_narrow_s16(s1[7], s1[4], cospi_16_64);
  301. s1[6] = add_multiply_shift_and_narrow_s16(s1[4], s1[7], cospi_16_64);
  302. s1[8] = vaddq_s16(s2[8], s2[11]);
  303. s1[9] = vaddq_s16(s2[9], s2[10]);
  304. s1[10] = vsubq_s16(s2[9], s2[10]);
  305. s1[11] = vsubq_s16(s2[8], s2[11]);
  306. s1[12] = vsubq_s16(s2[15], s2[12]);
  307. s1[13] = vsubq_s16(s2[14], s2[13]);
  308. s1[14] = vaddq_s16(s2[13], s2[14]);
  309. s1[15] = vaddq_s16(s2[12], s2[15]);
  310. s1[18] = multiply_accumulate_shift_and_narrow_s16(s2[18], -cospi_8_64, s2[29],
  311. cospi_24_64);
  312. s1[29] = multiply_accumulate_shift_and_narrow_s16(s2[18], cospi_24_64, s2[29],
  313. cospi_8_64);
  314. s1[19] = multiply_accumulate_shift_and_narrow_s16(s2[19], -cospi_8_64, s2[28],
  315. cospi_24_64);
  316. s1[28] = multiply_accumulate_shift_and_narrow_s16(s2[19], cospi_24_64, s2[28],
  317. cospi_8_64);
  318. s1[20] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_24_64,
  319. s2[27], -cospi_8_64);
  320. s1[27] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_8_64, s2[27],
  321. cospi_24_64);
  322. s1[21] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_24_64,
  323. s2[26], -cospi_8_64);
  324. s1[26] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_8_64, s2[26],
  325. cospi_24_64);
  326. // stage 6
  327. s2[0] = vaddq_s16(s1[0], s1[7]);
  328. s2[1] = vaddq_s16(s1[0], s1[6]);
  329. s2[2] = vaddq_s16(s1[0], s1[5]);
  330. s2[3] = vaddq_s16(s1[0], s1[4]);
  331. s2[4] = vsubq_s16(s1[0], s1[4]);
  332. s2[5] = vsubq_s16(s1[0], s1[5]);
  333. s2[6] = vsubq_s16(s1[0], s1[6]);
  334. s2[7] = vsubq_s16(s1[0], s1[7]);
  335. s2[10] = sub_multiply_shift_and_narrow_s16(s1[13], s1[10], cospi_16_64);
  336. s2[13] = add_multiply_shift_and_narrow_s16(s1[10], s1[13], cospi_16_64);
  337. s2[11] = sub_multiply_shift_and_narrow_s16(s1[12], s1[11], cospi_16_64);
  338. s2[12] = add_multiply_shift_and_narrow_s16(s1[11], s1[12], cospi_16_64);
  339. s1[16] = vaddq_s16(s2[16], s2[23]);
  340. s1[17] = vaddq_s16(s2[17], s2[22]);
  341. s2[18] = vaddq_s16(s1[18], s1[21]);
  342. s2[19] = vaddq_s16(s1[19], s1[20]);
  343. s2[20] = vsubq_s16(s1[19], s1[20]);
  344. s2[21] = vsubq_s16(s1[18], s1[21]);
  345. s1[22] = vsubq_s16(s2[17], s2[22]);
  346. s1[23] = vsubq_s16(s2[16], s2[23]);
  347. s3[24] = vsubq_s16(s2[31], s2[24]);
  348. s3[25] = vsubq_s16(s2[30], s2[25]);
  349. s3[26] = vsubq_s16(s1[29], s1[26]);
  350. s3[27] = vsubq_s16(s1[28], s1[27]);
  351. s2[28] = vaddq_s16(s1[27], s1[28]);
  352. s2[29] = vaddq_s16(s1[26], s1[29]);
  353. s2[30] = vaddq_s16(s2[25], s2[30]);
  354. s2[31] = vaddq_s16(s2[24], s2[31]);
  355. // stage 7
  356. s1[0] = vaddq_s16(s2[0], s1[15]);
  357. s1[1] = vaddq_s16(s2[1], s1[14]);
  358. s1[2] = vaddq_s16(s2[2], s2[13]);
  359. s1[3] = vaddq_s16(s2[3], s2[12]);
  360. s1[4] = vaddq_s16(s2[4], s2[11]);
  361. s1[5] = vaddq_s16(s2[5], s2[10]);
  362. s1[6] = vaddq_s16(s2[6], s1[9]);
  363. s1[7] = vaddq_s16(s2[7], s1[8]);
  364. s1[8] = vsubq_s16(s2[7], s1[8]);
  365. s1[9] = vsubq_s16(s2[6], s1[9]);
  366. s1[10] = vsubq_s16(s2[5], s2[10]);
  367. s1[11] = vsubq_s16(s2[4], s2[11]);
  368. s1[12] = vsubq_s16(s2[3], s2[12]);
  369. s1[13] = vsubq_s16(s2[2], s2[13]);
  370. s1[14] = vsubq_s16(s2[1], s1[14]);
  371. s1[15] = vsubq_s16(s2[0], s1[15]);
  372. s1[20] = sub_multiply_shift_and_narrow_s16(s3[27], s2[20], cospi_16_64);
  373. s1[27] = add_multiply_shift_and_narrow_s16(s2[20], s3[27], cospi_16_64);
  374. s1[21] = sub_multiply_shift_and_narrow_s16(s3[26], s2[21], cospi_16_64);
  375. s1[26] = add_multiply_shift_and_narrow_s16(s2[21], s3[26], cospi_16_64);
  376. s2[22] = sub_multiply_shift_and_narrow_s16(s3[25], s1[22], cospi_16_64);
  377. s1[25] = add_multiply_shift_and_narrow_s16(s1[22], s3[25], cospi_16_64);
  378. s2[23] = sub_multiply_shift_and_narrow_s16(s3[24], s1[23], cospi_16_64);
  379. s1[24] = add_multiply_shift_and_narrow_s16(s1[23], s3[24], cospi_16_64);
  380. // final stage
  381. out[0] = final_add(s1[0], s2[31]);
  382. out[1] = final_add(s1[1], s2[30]);
  383. out[2] = final_add(s1[2], s2[29]);
  384. out[3] = final_add(s1[3], s2[28]);
  385. out[4] = final_add(s1[4], s1[27]);
  386. out[5] = final_add(s1[5], s1[26]);
  387. out[6] = final_add(s1[6], s1[25]);
  388. out[7] = final_add(s1[7], s1[24]);
  389. out[8] = final_add(s1[8], s2[23]);
  390. out[9] = final_add(s1[9], s2[22]);
  391. out[10] = final_add(s1[10], s1[21]);
  392. out[11] = final_add(s1[11], s1[20]);
  393. out[12] = final_add(s1[12], s2[19]);
  394. out[13] = final_add(s1[13], s2[18]);
  395. out[14] = final_add(s1[14], s1[17]);
  396. out[15] = final_add(s1[15], s1[16]);
  397. out[16] = final_sub(s1[15], s1[16]);
  398. out[17] = final_sub(s1[14], s1[17]);
  399. out[18] = final_sub(s1[13], s2[18]);
  400. out[19] = final_sub(s1[12], s2[19]);
  401. out[20] = final_sub(s1[11], s1[20]);
  402. out[21] = final_sub(s1[10], s1[21]);
  403. out[22] = final_sub(s1[9], s2[22]);
  404. out[23] = final_sub(s1[8], s2[23]);
  405. out[24] = final_sub(s1[7], s1[24]);
  406. out[25] = final_sub(s1[6], s1[25]);
  407. out[26] = final_sub(s1[5], s1[26]);
  408. out[27] = final_sub(s1[4], s1[27]);
  409. out[28] = final_sub(s1[3], s2[28]);
  410. out[29] = final_sub(s1[2], s2[29]);
  411. out[30] = final_sub(s1[1], s2[30]);
  412. out[31] = final_sub(s1[0], s2[31]);
  413. if (highbd_flag) {
  414. highbd_add_and_store_bd8(out, output, stride);
  415. } else {
  416. uint8_t *const outputT = (uint8_t *)output;
  417. add_and_store_u8_s16(out + 0, outputT, stride);
  418. add_and_store_u8_s16(out + 8, outputT + (8 * stride), stride);
  419. add_and_store_u8_s16(out + 16, outputT + (16 * stride), stride);
  420. add_and_store_u8_s16(out + 24, outputT + (24 * stride), stride);
  421. }
  422. }
  423. void vpx_idct32x32_34_add_neon(const tran_low_t *input, uint8_t *dest,
  424. int stride) {
  425. int i;
  426. int16_t temp[32 * 8];
  427. int16_t *t = temp;
  428. vpx_idct32_6_neon(input, t);
  429. for (i = 0; i < 32; i += 8) {
  430. vpx_idct32_8_neon(t, dest, stride, 0);
  431. t += (8 * 8);
  432. dest += 8;
  433. }
  434. }