fdct16x16_neon.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <arm_neon.h>
  11. #include "./vpx_config.h"
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx_dsp/txfm_common.h"
  14. #include "vpx_dsp/arm/mem_neon.h"
  15. #include "vpx_dsp/arm/transpose_neon.h"
  16. // Some builds of gcc 4.9.2 and .3 have trouble with some of the inline
  17. // functions.
  18. #if !defined(__clang__) && !defined(__ANDROID__) && defined(__GNUC__) && \
  19. __GNUC__ == 4 && __GNUC_MINOR__ == 9 && __GNUC_PATCHLEVEL__ < 4
  20. void vpx_fdct16x16_neon(const int16_t *input, tran_low_t *output, int stride) {
  21. vpx_fdct16x16_c(input, output, stride);
  22. }
  23. #else
  24. static INLINE void load(const int16_t *a, int stride, int16x8_t *b /*[16]*/) {
  25. b[0] = vld1q_s16(a);
  26. a += stride;
  27. b[1] = vld1q_s16(a);
  28. a += stride;
  29. b[2] = vld1q_s16(a);
  30. a += stride;
  31. b[3] = vld1q_s16(a);
  32. a += stride;
  33. b[4] = vld1q_s16(a);
  34. a += stride;
  35. b[5] = vld1q_s16(a);
  36. a += stride;
  37. b[6] = vld1q_s16(a);
  38. a += stride;
  39. b[7] = vld1q_s16(a);
  40. a += stride;
  41. b[8] = vld1q_s16(a);
  42. a += stride;
  43. b[9] = vld1q_s16(a);
  44. a += stride;
  45. b[10] = vld1q_s16(a);
  46. a += stride;
  47. b[11] = vld1q_s16(a);
  48. a += stride;
  49. b[12] = vld1q_s16(a);
  50. a += stride;
  51. b[13] = vld1q_s16(a);
  52. a += stride;
  53. b[14] = vld1q_s16(a);
  54. a += stride;
  55. b[15] = vld1q_s16(a);
  56. }
  57. // Store 8 16x8 values, assuming stride == 16.
  58. static INLINE void store(tran_low_t *a, const int16x8_t *b /*[8]*/) {
  59. store_s16q_to_tran_low(a, b[0]);
  60. a += 16;
  61. store_s16q_to_tran_low(a, b[1]);
  62. a += 16;
  63. store_s16q_to_tran_low(a, b[2]);
  64. a += 16;
  65. store_s16q_to_tran_low(a, b[3]);
  66. a += 16;
  67. store_s16q_to_tran_low(a, b[4]);
  68. a += 16;
  69. store_s16q_to_tran_low(a, b[5]);
  70. a += 16;
  71. store_s16q_to_tran_low(a, b[6]);
  72. a += 16;
  73. store_s16q_to_tran_low(a, b[7]);
  74. }
  75. // Load step of each pass. Add and subtract clear across the input, requiring
  76. // all 16 values to be loaded. For the first pass it also multiplies by 4.
  77. // To maybe reduce register usage this could be combined with the load() step to
  78. // get the first 4 and last 4 values, cross those, then load the middle 8 values
  79. // and cross them.
  80. static INLINE void cross_input(const int16x8_t *a /*[16]*/,
  81. int16x8_t *b /*[16]*/, const int pass) {
  82. if (pass == 0) {
  83. b[0] = vshlq_n_s16(vaddq_s16(a[0], a[15]), 2);
  84. b[1] = vshlq_n_s16(vaddq_s16(a[1], a[14]), 2);
  85. b[2] = vshlq_n_s16(vaddq_s16(a[2], a[13]), 2);
  86. b[3] = vshlq_n_s16(vaddq_s16(a[3], a[12]), 2);
  87. b[4] = vshlq_n_s16(vaddq_s16(a[4], a[11]), 2);
  88. b[5] = vshlq_n_s16(vaddq_s16(a[5], a[10]), 2);
  89. b[6] = vshlq_n_s16(vaddq_s16(a[6], a[9]), 2);
  90. b[7] = vshlq_n_s16(vaddq_s16(a[7], a[8]), 2);
  91. b[8] = vshlq_n_s16(vsubq_s16(a[7], a[8]), 2);
  92. b[9] = vshlq_n_s16(vsubq_s16(a[6], a[9]), 2);
  93. b[10] = vshlq_n_s16(vsubq_s16(a[5], a[10]), 2);
  94. b[11] = vshlq_n_s16(vsubq_s16(a[4], a[11]), 2);
  95. b[12] = vshlq_n_s16(vsubq_s16(a[3], a[12]), 2);
  96. b[13] = vshlq_n_s16(vsubq_s16(a[2], a[13]), 2);
  97. b[14] = vshlq_n_s16(vsubq_s16(a[1], a[14]), 2);
  98. b[15] = vshlq_n_s16(vsubq_s16(a[0], a[15]), 2);
  99. } else {
  100. b[0] = vaddq_s16(a[0], a[15]);
  101. b[1] = vaddq_s16(a[1], a[14]);
  102. b[2] = vaddq_s16(a[2], a[13]);
  103. b[3] = vaddq_s16(a[3], a[12]);
  104. b[4] = vaddq_s16(a[4], a[11]);
  105. b[5] = vaddq_s16(a[5], a[10]);
  106. b[6] = vaddq_s16(a[6], a[9]);
  107. b[7] = vaddq_s16(a[7], a[8]);
  108. b[8] = vsubq_s16(a[7], a[8]);
  109. b[9] = vsubq_s16(a[6], a[9]);
  110. b[10] = vsubq_s16(a[5], a[10]);
  111. b[11] = vsubq_s16(a[4], a[11]);
  112. b[12] = vsubq_s16(a[3], a[12]);
  113. b[13] = vsubq_s16(a[2], a[13]);
  114. b[14] = vsubq_s16(a[1], a[14]);
  115. b[15] = vsubq_s16(a[0], a[15]);
  116. }
  117. }
  118. // Quarter round at the beginning of the second pass. Can't use vrshr (rounding)
  119. // because this only adds 1, not 1 << 2.
  120. static INLINE void partial_round_shift(int16x8_t *a /*[16]*/) {
  121. const int16x8_t one = vdupq_n_s16(1);
  122. a[0] = vshrq_n_s16(vaddq_s16(a[0], one), 2);
  123. a[1] = vshrq_n_s16(vaddq_s16(a[1], one), 2);
  124. a[2] = vshrq_n_s16(vaddq_s16(a[2], one), 2);
  125. a[3] = vshrq_n_s16(vaddq_s16(a[3], one), 2);
  126. a[4] = vshrq_n_s16(vaddq_s16(a[4], one), 2);
  127. a[5] = vshrq_n_s16(vaddq_s16(a[5], one), 2);
  128. a[6] = vshrq_n_s16(vaddq_s16(a[6], one), 2);
  129. a[7] = vshrq_n_s16(vaddq_s16(a[7], one), 2);
  130. a[8] = vshrq_n_s16(vaddq_s16(a[8], one), 2);
  131. a[9] = vshrq_n_s16(vaddq_s16(a[9], one), 2);
  132. a[10] = vshrq_n_s16(vaddq_s16(a[10], one), 2);
  133. a[11] = vshrq_n_s16(vaddq_s16(a[11], one), 2);
  134. a[12] = vshrq_n_s16(vaddq_s16(a[12], one), 2);
  135. a[13] = vshrq_n_s16(vaddq_s16(a[13], one), 2);
  136. a[14] = vshrq_n_s16(vaddq_s16(a[14], one), 2);
  137. a[15] = vshrq_n_s16(vaddq_s16(a[15], one), 2);
  138. }
  139. // fdct_round_shift((a +/- b) * c)
  140. static INLINE void butterfly_one_coeff(const int16x8_t a, const int16x8_t b,
  141. const tran_high_t c, int16x8_t *add,
  142. int16x8_t *sub) {
  143. const int32x4_t a0 = vmull_n_s16(vget_low_s16(a), c);
  144. const int32x4_t a1 = vmull_n_s16(vget_high_s16(a), c);
  145. const int32x4_t sum0 = vmlal_n_s16(a0, vget_low_s16(b), c);
  146. const int32x4_t sum1 = vmlal_n_s16(a1, vget_high_s16(b), c);
  147. const int32x4_t diff0 = vmlsl_n_s16(a0, vget_low_s16(b), c);
  148. const int32x4_t diff1 = vmlsl_n_s16(a1, vget_high_s16(b), c);
  149. const int16x4_t rounded0 = vqrshrn_n_s32(sum0, 14);
  150. const int16x4_t rounded1 = vqrshrn_n_s32(sum1, 14);
  151. const int16x4_t rounded2 = vqrshrn_n_s32(diff0, 14);
  152. const int16x4_t rounded3 = vqrshrn_n_s32(diff1, 14);
  153. *add = vcombine_s16(rounded0, rounded1);
  154. *sub = vcombine_s16(rounded2, rounded3);
  155. }
  156. // fdct_round_shift(a * c0 +/- b * c1)
  157. static INLINE void butterfly_two_coeff(const int16x8_t a, const int16x8_t b,
  158. const tran_coef_t c0,
  159. const tran_coef_t c1, int16x8_t *add,
  160. int16x8_t *sub) {
  161. const int32x4_t a0 = vmull_n_s16(vget_low_s16(a), c0);
  162. const int32x4_t a1 = vmull_n_s16(vget_high_s16(a), c0);
  163. const int32x4_t a2 = vmull_n_s16(vget_low_s16(a), c1);
  164. const int32x4_t a3 = vmull_n_s16(vget_high_s16(a), c1);
  165. const int32x4_t sum0 = vmlal_n_s16(a2, vget_low_s16(b), c0);
  166. const int32x4_t sum1 = vmlal_n_s16(a3, vget_high_s16(b), c0);
  167. const int32x4_t diff0 = vmlsl_n_s16(a0, vget_low_s16(b), c1);
  168. const int32x4_t diff1 = vmlsl_n_s16(a1, vget_high_s16(b), c1);
  169. const int16x4_t rounded0 = vqrshrn_n_s32(sum0, 14);
  170. const int16x4_t rounded1 = vqrshrn_n_s32(sum1, 14);
  171. const int16x4_t rounded2 = vqrshrn_n_s32(diff0, 14);
  172. const int16x4_t rounded3 = vqrshrn_n_s32(diff1, 14);
  173. *add = vcombine_s16(rounded0, rounded1);
  174. *sub = vcombine_s16(rounded2, rounded3);
  175. }
  176. // Transpose 8x8 to a new location. Don't use transpose_neon.h because those
  177. // are all in-place.
  178. static INLINE void transpose_8x8(const int16x8_t *a /*[8]*/,
  179. int16x8_t *b /*[8]*/) {
  180. // Swap 16 bit elements.
  181. const int16x8x2_t c0 = vtrnq_s16(a[0], a[1]);
  182. const int16x8x2_t c1 = vtrnq_s16(a[2], a[3]);
  183. const int16x8x2_t c2 = vtrnq_s16(a[4], a[5]);
  184. const int16x8x2_t c3 = vtrnq_s16(a[6], a[7]);
  185. // Swap 32 bit elements.
  186. const int32x4x2_t d0 = vtrnq_s32(vreinterpretq_s32_s16(c0.val[0]),
  187. vreinterpretq_s32_s16(c1.val[0]));
  188. const int32x4x2_t d1 = vtrnq_s32(vreinterpretq_s32_s16(c0.val[1]),
  189. vreinterpretq_s32_s16(c1.val[1]));
  190. const int32x4x2_t d2 = vtrnq_s32(vreinterpretq_s32_s16(c2.val[0]),
  191. vreinterpretq_s32_s16(c3.val[0]));
  192. const int32x4x2_t d3 = vtrnq_s32(vreinterpretq_s32_s16(c2.val[1]),
  193. vreinterpretq_s32_s16(c3.val[1]));
  194. // Swap 64 bit elements
  195. const int16x8x2_t e0 = vpx_vtrnq_s64_to_s16(d0.val[0], d2.val[0]);
  196. const int16x8x2_t e1 = vpx_vtrnq_s64_to_s16(d1.val[0], d3.val[0]);
  197. const int16x8x2_t e2 = vpx_vtrnq_s64_to_s16(d0.val[1], d2.val[1]);
  198. const int16x8x2_t e3 = vpx_vtrnq_s64_to_s16(d1.val[1], d3.val[1]);
  199. b[0] = e0.val[0];
  200. b[1] = e1.val[0];
  201. b[2] = e2.val[0];
  202. b[3] = e3.val[0];
  203. b[4] = e0.val[1];
  204. b[5] = e1.val[1];
  205. b[6] = e2.val[1];
  206. b[7] = e3.val[1];
  207. }
  208. // Main body of fdct16x16.
  209. static void dct_body(const int16x8_t *in /*[16]*/, int16x8_t *out /*[16]*/) {
  210. int16x8_t s[8];
  211. int16x8_t x[4];
  212. int16x8_t step[8];
  213. // stage 1
  214. // From fwd_txfm.c: Work on the first eight values; fdct8(input,
  215. // even_results);"
  216. s[0] = vaddq_s16(in[0], in[7]);
  217. s[1] = vaddq_s16(in[1], in[6]);
  218. s[2] = vaddq_s16(in[2], in[5]);
  219. s[3] = vaddq_s16(in[3], in[4]);
  220. s[4] = vsubq_s16(in[3], in[4]);
  221. s[5] = vsubq_s16(in[2], in[5]);
  222. s[6] = vsubq_s16(in[1], in[6]);
  223. s[7] = vsubq_s16(in[0], in[7]);
  224. // fdct4(step, step);
  225. x[0] = vaddq_s16(s[0], s[3]);
  226. x[1] = vaddq_s16(s[1], s[2]);
  227. x[2] = vsubq_s16(s[1], s[2]);
  228. x[3] = vsubq_s16(s[0], s[3]);
  229. // out[0] = fdct_round_shift((x0 + x1) * cospi_16_64)
  230. // out[8] = fdct_round_shift((x0 - x1) * cospi_16_64)
  231. butterfly_one_coeff(x[0], x[1], cospi_16_64, &out[0], &out[8]);
  232. // out[4] = fdct_round_shift(x3 * cospi_8_64 + x2 * cospi_24_64);
  233. // out[12] = fdct_round_shift(x3 * cospi_24_64 - x2 * cospi_8_64);
  234. butterfly_two_coeff(x[3], x[2], cospi_24_64, cospi_8_64, &out[4], &out[12]);
  235. // Stage 2
  236. // Re-using source s5/s6
  237. // s5 = fdct_round_shift((s6 - s5) * cospi_16_64)
  238. // s6 = fdct_round_shift((s6 + s5) * cospi_16_64)
  239. butterfly_one_coeff(s[6], s[5], cospi_16_64, &s[6], &s[5]);
  240. // Stage 3
  241. x[0] = vaddq_s16(s[4], s[5]);
  242. x[1] = vsubq_s16(s[4], s[5]);
  243. x[2] = vsubq_s16(s[7], s[6]);
  244. x[3] = vaddq_s16(s[7], s[6]);
  245. // Stage 4
  246. // out[2] = fdct_round_shift(x0 * cospi_28_64 + x3 * cospi_4_64)
  247. // out[14] = fdct_round_shift(x3 * cospi_28_64 + x0 * -cospi_4_64)
  248. butterfly_two_coeff(x[3], x[0], cospi_28_64, cospi_4_64, &out[2], &out[14]);
  249. // out[6] = fdct_round_shift(x1 * cospi_12_64 + x2 * cospi_20_64)
  250. // out[10] = fdct_round_shift(x2 * cospi_12_64 + x1 * -cospi_20_64)
  251. butterfly_two_coeff(x[2], x[1], cospi_12_64, cospi_20_64, &out[10], &out[6]);
  252. // step 2
  253. // From fwd_txfm.c: Work on the next eight values; step1 -> odd_results"
  254. // That file distinguished between "in_high" and "step1" but the only
  255. // difference is that "in_high" is the first 8 values and "step 1" is the
  256. // second. Here, since they are all in one array, "step1" values are += 8.
  257. // step2[2] = fdct_round_shift((step1[5] - step1[2]) * cospi_16_64)
  258. // step2[3] = fdct_round_shift((step1[4] - step1[3]) * cospi_16_64)
  259. // step2[4] = fdct_round_shift((step1[4] + step1[3]) * cospi_16_64)
  260. // step2[5] = fdct_round_shift((step1[5] + step1[2]) * cospi_16_64)
  261. butterfly_one_coeff(in[13], in[10], cospi_16_64, &s[5], &s[2]);
  262. butterfly_one_coeff(in[12], in[11], cospi_16_64, &s[4], &s[3]);
  263. // step 3
  264. s[0] = vaddq_s16(in[8], s[3]);
  265. s[1] = vaddq_s16(in[9], s[2]);
  266. x[0] = vsubq_s16(in[9], s[2]);
  267. x[1] = vsubq_s16(in[8], s[3]);
  268. x[2] = vsubq_s16(in[15], s[4]);
  269. x[3] = vsubq_s16(in[14], s[5]);
  270. s[6] = vaddq_s16(in[14], s[5]);
  271. s[7] = vaddq_s16(in[15], s[4]);
  272. // step 4
  273. // step2[1] = fdct_round_shift(step3[1] *-cospi_8_64 + step3[6] * cospi_24_64)
  274. // step2[6] = fdct_round_shift(step3[1] * cospi_24_64 + step3[6] * cospi_8_64)
  275. butterfly_two_coeff(s[6], s[1], cospi_24_64, cospi_8_64, &s[6], &s[1]);
  276. // step2[2] = fdct_round_shift(step3[2] * cospi_24_64 + step3[5] * cospi_8_64)
  277. // step2[5] = fdct_round_shift(step3[2] * cospi_8_64 - step3[5] * cospi_24_64)
  278. butterfly_two_coeff(x[0], x[3], cospi_8_64, cospi_24_64, &s[2], &s[5]);
  279. // step 5
  280. step[0] = vaddq_s16(s[0], s[1]);
  281. step[1] = vsubq_s16(s[0], s[1]);
  282. step[2] = vaddq_s16(x[1], s[2]);
  283. step[3] = vsubq_s16(x[1], s[2]);
  284. step[4] = vsubq_s16(x[2], s[5]);
  285. step[5] = vaddq_s16(x[2], s[5]);
  286. step[6] = vsubq_s16(s[7], s[6]);
  287. step[7] = vaddq_s16(s[7], s[6]);
  288. // step 6
  289. // out[1] = fdct_round_shift(step1[0] * cospi_30_64 + step1[7] * cospi_2_64)
  290. // out[9] = fdct_round_shift(step1[1] * cospi_14_64 + step1[6] * cospi_18_64)
  291. // out[5] = fdct_round_shift(step1[2] * cospi_22_64 + step1[5] * cospi_10_64)
  292. // out[13] = fdct_round_shift(step1[3] * cospi_6_64 + step1[4] * cospi_26_64)
  293. // out[3] = fdct_round_shift(step1[3] * -cospi_26_64 + step1[4] * cospi_6_64)
  294. // out[11] = fdct_round_shift(step1[2] * -cospi_10_64 + step1[5] *
  295. // cospi_22_64)
  296. // out[7] = fdct_round_shift(step1[1] * -cospi_18_64 + step1[6] * cospi_14_64)
  297. // out[15] = fdct_round_shift(step1[0] * -cospi_2_64 + step1[7] * cospi_30_64)
  298. butterfly_two_coeff(step[6], step[1], cospi_14_64, cospi_18_64, &out[9],
  299. &out[7]);
  300. butterfly_two_coeff(step[7], step[0], cospi_30_64, cospi_2_64, &out[1],
  301. &out[15]);
  302. butterfly_two_coeff(step[4], step[3], cospi_6_64, cospi_26_64, &out[13],
  303. &out[3]);
  304. butterfly_two_coeff(step[5], step[2], cospi_22_64, cospi_10_64, &out[5],
  305. &out[11]);
  306. }
  307. void vpx_fdct16x16_neon(const int16_t *input, tran_low_t *output, int stride) {
  308. int16x8_t temp0[16];
  309. int16x8_t temp1[16];
  310. int16x8_t temp2[16];
  311. int16x8_t temp3[16];
  312. // Left half.
  313. load(input, stride, temp0);
  314. cross_input(temp0, temp1, 0);
  315. dct_body(temp1, temp0);
  316. // Right half.
  317. load(input + 8, stride, temp1);
  318. cross_input(temp1, temp2, 0);
  319. dct_body(temp2, temp1);
  320. // Transpose top left and top right quarters into one contiguous location to
  321. // process to the top half.
  322. transpose_8x8(&temp0[0], &temp2[0]);
  323. transpose_8x8(&temp1[0], &temp2[8]);
  324. partial_round_shift(temp2);
  325. cross_input(temp2, temp3, 1);
  326. dct_body(temp3, temp2);
  327. transpose_s16_8x8(&temp2[0], &temp2[1], &temp2[2], &temp2[3], &temp2[4],
  328. &temp2[5], &temp2[6], &temp2[7]);
  329. transpose_s16_8x8(&temp2[8], &temp2[9], &temp2[10], &temp2[11], &temp2[12],
  330. &temp2[13], &temp2[14], &temp2[15]);
  331. store(output, temp2);
  332. store(output + 8, temp2 + 8);
  333. output += 8 * 16;
  334. // Transpose bottom left and bottom right quarters into one contiguous
  335. // location to process to the bottom half.
  336. transpose_8x8(&temp0[8], &temp1[0]);
  337. transpose_s16_8x8(&temp1[8], &temp1[9], &temp1[10], &temp1[11], &temp1[12],
  338. &temp1[13], &temp1[14], &temp1[15]);
  339. partial_round_shift(temp1);
  340. cross_input(temp1, temp0, 1);
  341. dct_body(temp0, temp1);
  342. transpose_s16_8x8(&temp1[0], &temp1[1], &temp1[2], &temp1[3], &temp1[4],
  343. &temp1[5], &temp1[6], &temp1[7]);
  344. transpose_s16_8x8(&temp1[8], &temp1[9], &temp1[10], &temp1[11], &temp1[12],
  345. &temp1[13], &temp1[14], &temp1[15]);
  346. store(output, temp1);
  347. store(output + 8, temp1 + 8);
  348. }
  349. #endif // !defined(__clang__) && !defined(__ANDROID__) && defined(__GNUC__) &&
  350. // __GNUC__ == 4 && __GNUC_MINOR__ == 9 && __GNUC_PATCHLEVEL__ < 4