shortfdct_neon.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /*
  2. * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <arm_neon.h>
  11. #include "./vp8_rtcd.h"
  12. void vp8_short_fdct4x4_neon(int16_t *input, int16_t *output, int pitch) {
  13. int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
  14. int16x4_t d16s16, d17s16, d26s16, dEmptys16;
  15. uint16x4_t d4u16;
  16. int16x8_t q0s16, q1s16;
  17. int32x4_t q9s32, q10s32, q11s32, q12s32;
  18. int16x4x2_t v2tmp0, v2tmp1;
  19. int32x2x2_t v2tmp2, v2tmp3;
  20. d16s16 = vdup_n_s16(5352);
  21. d17s16 = vdup_n_s16(2217);
  22. q9s32 = vdupq_n_s32(14500);
  23. q10s32 = vdupq_n_s32(7500);
  24. q11s32 = vdupq_n_s32(12000);
  25. q12s32 = vdupq_n_s32(51000);
  26. // Part one
  27. pitch >>= 1;
  28. d0s16 = vld1_s16(input);
  29. input += pitch;
  30. d1s16 = vld1_s16(input);
  31. input += pitch;
  32. d2s16 = vld1_s16(input);
  33. input += pitch;
  34. d3s16 = vld1_s16(input);
  35. v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16));
  36. v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16));
  37. v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0
  38. vreinterpret_s16_s32(v2tmp3.val[0])); // d1
  39. v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2
  40. vreinterpret_s16_s32(v2tmp3.val[1])); // d3
  41. d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
  42. d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
  43. d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
  44. d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
  45. d4s16 = vshl_n_s16(d4s16, 3);
  46. d5s16 = vshl_n_s16(d5s16, 3);
  47. d6s16 = vshl_n_s16(d6s16, 3);
  48. d7s16 = vshl_n_s16(d7s16, 3);
  49. d0s16 = vadd_s16(d4s16, d5s16);
  50. d2s16 = vsub_s16(d4s16, d5s16);
  51. q9s32 = vmlal_s16(q9s32, d7s16, d16s16);
  52. q10s32 = vmlal_s16(q10s32, d7s16, d17s16);
  53. q9s32 = vmlal_s16(q9s32, d6s16, d17s16);
  54. q10s32 = vmlsl_s16(q10s32, d6s16, d16s16);
  55. d1s16 = vshrn_n_s32(q9s32, 12);
  56. d3s16 = vshrn_n_s32(q10s32, 12);
  57. // Part two
  58. v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16));
  59. v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16));
  60. v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0
  61. vreinterpret_s16_s32(v2tmp3.val[0])); // d1
  62. v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2
  63. vreinterpret_s16_s32(v2tmp3.val[1])); // d3
  64. d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
  65. d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
  66. d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
  67. d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
  68. d26s16 = vdup_n_s16(7);
  69. d4s16 = vadd_s16(d4s16, d26s16);
  70. d0s16 = vadd_s16(d4s16, d5s16);
  71. d2s16 = vsub_s16(d4s16, d5s16);
  72. q11s32 = vmlal_s16(q11s32, d7s16, d16s16);
  73. q12s32 = vmlal_s16(q12s32, d7s16, d17s16);
  74. dEmptys16 = vdup_n_s16(0);
  75. d4u16 = vceq_s16(d7s16, dEmptys16);
  76. d0s16 = vshr_n_s16(d0s16, 4);
  77. d2s16 = vshr_n_s16(d2s16, 4);
  78. q11s32 = vmlal_s16(q11s32, d6s16, d17s16);
  79. q12s32 = vmlsl_s16(q12s32, d6s16, d16s16);
  80. d4u16 = vmvn_u16(d4u16);
  81. d1s16 = vshrn_n_s32(q11s32, 16);
  82. d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d4u16));
  83. d3s16 = vshrn_n_s32(q12s32, 16);
  84. q0s16 = vcombine_s16(d0s16, d1s16);
  85. q1s16 = vcombine_s16(d2s16, d3s16);
  86. vst1q_s16(output, q0s16);
  87. vst1q_s16(output + 8, q1s16);
  88. return;
  89. }
  90. void vp8_short_fdct8x4_neon(int16_t *input, int16_t *output, int pitch) {
  91. int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
  92. int16x4_t d16s16, d17s16, d26s16, d27s16, d28s16, d29s16;
  93. uint16x4_t d28u16, d29u16;
  94. uint16x8_t q14u16;
  95. int16x8_t q0s16, q1s16, q2s16, q3s16;
  96. int16x8_t q11s16, q12s16, q13s16, q14s16, q15s16, qEmptys16;
  97. int32x4_t q9s32, q10s32, q11s32, q12s32;
  98. int16x8x2_t v2tmp0, v2tmp1;
  99. int32x4x2_t v2tmp2, v2tmp3;
  100. d16s16 = vdup_n_s16(5352);
  101. d17s16 = vdup_n_s16(2217);
  102. q9s32 = vdupq_n_s32(14500);
  103. q10s32 = vdupq_n_s32(7500);
  104. // Part one
  105. pitch >>= 1;
  106. q0s16 = vld1q_s16(input);
  107. input += pitch;
  108. q1s16 = vld1q_s16(input);
  109. input += pitch;
  110. q2s16 = vld1q_s16(input);
  111. input += pitch;
  112. q3s16 = vld1q_s16(input);
  113. v2tmp2 =
  114. vtrnq_s32(vreinterpretq_s32_s16(q0s16), vreinterpretq_s32_s16(q2s16));
  115. v2tmp3 =
  116. vtrnq_s32(vreinterpretq_s32_s16(q1s16), vreinterpretq_s32_s16(q3s16));
  117. v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0
  118. vreinterpretq_s16_s32(v2tmp3.val[0])); // q1
  119. v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2
  120. vreinterpretq_s16_s32(v2tmp3.val[1])); // q3
  121. q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]);
  122. q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]);
  123. q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]);
  124. q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]);
  125. q11s16 = vshlq_n_s16(q11s16, 3);
  126. q12s16 = vshlq_n_s16(q12s16, 3);
  127. q13s16 = vshlq_n_s16(q13s16, 3);
  128. q14s16 = vshlq_n_s16(q14s16, 3);
  129. q0s16 = vaddq_s16(q11s16, q12s16);
  130. q2s16 = vsubq_s16(q11s16, q12s16);
  131. q11s32 = q9s32;
  132. q12s32 = q10s32;
  133. d26s16 = vget_low_s16(q13s16);
  134. d27s16 = vget_high_s16(q13s16);
  135. d28s16 = vget_low_s16(q14s16);
  136. d29s16 = vget_high_s16(q14s16);
  137. q9s32 = vmlal_s16(q9s32, d28s16, d16s16);
  138. q10s32 = vmlal_s16(q10s32, d28s16, d17s16);
  139. q11s32 = vmlal_s16(q11s32, d29s16, d16s16);
  140. q12s32 = vmlal_s16(q12s32, d29s16, d17s16);
  141. q9s32 = vmlal_s16(q9s32, d26s16, d17s16);
  142. q10s32 = vmlsl_s16(q10s32, d26s16, d16s16);
  143. q11s32 = vmlal_s16(q11s32, d27s16, d17s16);
  144. q12s32 = vmlsl_s16(q12s32, d27s16, d16s16);
  145. d2s16 = vshrn_n_s32(q9s32, 12);
  146. d6s16 = vshrn_n_s32(q10s32, 12);
  147. d3s16 = vshrn_n_s32(q11s32, 12);
  148. d7s16 = vshrn_n_s32(q12s32, 12);
  149. q1s16 = vcombine_s16(d2s16, d3s16);
  150. q3s16 = vcombine_s16(d6s16, d7s16);
  151. // Part two
  152. q9s32 = vdupq_n_s32(12000);
  153. q10s32 = vdupq_n_s32(51000);
  154. v2tmp2 =
  155. vtrnq_s32(vreinterpretq_s32_s16(q0s16), vreinterpretq_s32_s16(q2s16));
  156. v2tmp3 =
  157. vtrnq_s32(vreinterpretq_s32_s16(q1s16), vreinterpretq_s32_s16(q3s16));
  158. v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0
  159. vreinterpretq_s16_s32(v2tmp3.val[0])); // q1
  160. v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2
  161. vreinterpretq_s16_s32(v2tmp3.val[1])); // q3
  162. q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]);
  163. q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]);
  164. q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]);
  165. q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]);
  166. q15s16 = vdupq_n_s16(7);
  167. q11s16 = vaddq_s16(q11s16, q15s16);
  168. q0s16 = vaddq_s16(q11s16, q12s16);
  169. q1s16 = vsubq_s16(q11s16, q12s16);
  170. q11s32 = q9s32;
  171. q12s32 = q10s32;
  172. d0s16 = vget_low_s16(q0s16);
  173. d1s16 = vget_high_s16(q0s16);
  174. d2s16 = vget_low_s16(q1s16);
  175. d3s16 = vget_high_s16(q1s16);
  176. d0s16 = vshr_n_s16(d0s16, 4);
  177. d4s16 = vshr_n_s16(d1s16, 4);
  178. d2s16 = vshr_n_s16(d2s16, 4);
  179. d6s16 = vshr_n_s16(d3s16, 4);
  180. d26s16 = vget_low_s16(q13s16);
  181. d27s16 = vget_high_s16(q13s16);
  182. d28s16 = vget_low_s16(q14s16);
  183. d29s16 = vget_high_s16(q14s16);
  184. q9s32 = vmlal_s16(q9s32, d28s16, d16s16);
  185. q10s32 = vmlal_s16(q10s32, d28s16, d17s16);
  186. q11s32 = vmlal_s16(q11s32, d29s16, d16s16);
  187. q12s32 = vmlal_s16(q12s32, d29s16, d17s16);
  188. q9s32 = vmlal_s16(q9s32, d26s16, d17s16);
  189. q10s32 = vmlsl_s16(q10s32, d26s16, d16s16);
  190. q11s32 = vmlal_s16(q11s32, d27s16, d17s16);
  191. q12s32 = vmlsl_s16(q12s32, d27s16, d16s16);
  192. d1s16 = vshrn_n_s32(q9s32, 16);
  193. d3s16 = vshrn_n_s32(q10s32, 16);
  194. d5s16 = vshrn_n_s32(q11s32, 16);
  195. d7s16 = vshrn_n_s32(q12s32, 16);
  196. qEmptys16 = vdupq_n_s16(0);
  197. q14u16 = vceqq_s16(q14s16, qEmptys16);
  198. q14u16 = vmvnq_u16(q14u16);
  199. d28u16 = vget_low_u16(q14u16);
  200. d29u16 = vget_high_u16(q14u16);
  201. d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d28u16));
  202. d5s16 = vsub_s16(d5s16, vreinterpret_s16_u16(d29u16));
  203. q0s16 = vcombine_s16(d0s16, d1s16);
  204. q1s16 = vcombine_s16(d2s16, d3s16);
  205. q2s16 = vcombine_s16(d4s16, d5s16);
  206. q3s16 = vcombine_s16(d6s16, d7s16);
  207. vst1q_s16(output, q0s16);
  208. vst1q_s16(output + 8, q1s16);
  209. vst1q_s16(output + 16, q2s16);
  210. vst1q_s16(output + 24, q3s16);
  211. return;
  212. }