idct_blk_neon.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <arm_neon.h>
  11. #include "./vp8_rtcd.h"
  12. static void idct_dequant_0_2x_neon(int16_t *q, int16_t dq, unsigned char *dst,
  13. int stride) {
  14. unsigned char *dst0;
  15. int i, a0, a1;
  16. int16x8x2_t q2Add;
  17. int32x2_t d2s32 = vdup_n_s32(0), d4s32 = vdup_n_s32(0);
  18. uint8x8_t d2u8, d4u8;
  19. uint16x8_t q1u16, q2u16;
  20. a0 = ((q[0] * dq) + 4) >> 3;
  21. a1 = ((q[16] * dq) + 4) >> 3;
  22. q[0] = q[16] = 0;
  23. q2Add.val[0] = vdupq_n_s16((int16_t)a0);
  24. q2Add.val[1] = vdupq_n_s16((int16_t)a1);
  25. for (i = 0; i < 2; i++, dst += 4) {
  26. dst0 = dst;
  27. d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0);
  28. dst0 += stride;
  29. d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1);
  30. dst0 += stride;
  31. d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0);
  32. dst0 += stride;
  33. d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1);
  34. q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
  35. vreinterpret_u8_s32(d2s32));
  36. q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
  37. vreinterpret_u8_s32(d4s32));
  38. d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
  39. d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16));
  40. d2s32 = vreinterpret_s32_u8(d2u8);
  41. d4s32 = vreinterpret_s32_u8(d4u8);
  42. dst0 = dst;
  43. vst1_lane_s32((int32_t *)dst0, d2s32, 0);
  44. dst0 += stride;
  45. vst1_lane_s32((int32_t *)dst0, d2s32, 1);
  46. dst0 += stride;
  47. vst1_lane_s32((int32_t *)dst0, d4s32, 0);
  48. dst0 += stride;
  49. vst1_lane_s32((int32_t *)dst0, d4s32, 1);
  50. }
  51. }
  52. static const int16_t cospi8sqrt2minus1 = 20091;
  53. static const int16_t sinpi8sqrt2 = 17734;
  54. // because the lowest bit in 0x8a8c is 0, we can pre-shift this
  55. static void idct_dequant_full_2x_neon(int16_t *q, int16_t *dq,
  56. unsigned char *dst, int stride) {
  57. unsigned char *dst0, *dst1;
  58. int32x2_t d28, d29, d30, d31;
  59. int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11;
  60. int16x8_t qEmpty = vdupq_n_s16(0);
  61. int32x4x2_t q2tmp0, q2tmp1;
  62. int16x8x2_t q2tmp2, q2tmp3;
  63. int16x4_t dLow0, dLow1, dHigh0, dHigh1;
  64. d28 = d29 = d30 = d31 = vdup_n_s32(0);
  65. // load dq
  66. q0 = vld1q_s16(dq);
  67. dq += 8;
  68. q1 = vld1q_s16(dq);
  69. // load q
  70. q2 = vld1q_s16(q);
  71. vst1q_s16(q, qEmpty);
  72. q += 8;
  73. q3 = vld1q_s16(q);
  74. vst1q_s16(q, qEmpty);
  75. q += 8;
  76. q4 = vld1q_s16(q);
  77. vst1q_s16(q, qEmpty);
  78. q += 8;
  79. q5 = vld1q_s16(q);
  80. vst1q_s16(q, qEmpty);
  81. // load src from dst
  82. dst0 = dst;
  83. dst1 = dst + 4;
  84. d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0);
  85. dst0 += stride;
  86. d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1);
  87. dst1 += stride;
  88. d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0);
  89. dst0 += stride;
  90. d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1);
  91. dst1 += stride;
  92. d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0);
  93. dst0 += stride;
  94. d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1);
  95. dst1 += stride;
  96. d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0);
  97. d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1);
  98. q2 = vmulq_s16(q2, q0);
  99. q3 = vmulq_s16(q3, q1);
  100. q4 = vmulq_s16(q4, q0);
  101. q5 = vmulq_s16(q5, q1);
  102. // vswp
  103. dLow0 = vget_low_s16(q2);
  104. dHigh0 = vget_high_s16(q2);
  105. dLow1 = vget_low_s16(q4);
  106. dHigh1 = vget_high_s16(q4);
  107. q2 = vcombine_s16(dLow0, dLow1);
  108. q4 = vcombine_s16(dHigh0, dHigh1);
  109. dLow0 = vget_low_s16(q3);
  110. dHigh0 = vget_high_s16(q3);
  111. dLow1 = vget_low_s16(q5);
  112. dHigh1 = vget_high_s16(q5);
  113. q3 = vcombine_s16(dLow0, dLow1);
  114. q5 = vcombine_s16(dHigh0, dHigh1);
  115. q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2);
  116. q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2);
  117. q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1);
  118. q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1);
  119. q10 = vqaddq_s16(q2, q3);
  120. q11 = vqsubq_s16(q2, q3);
  121. q8 = vshrq_n_s16(q8, 1);
  122. q9 = vshrq_n_s16(q9, 1);
  123. q4 = vqaddq_s16(q4, q8);
  124. q5 = vqaddq_s16(q5, q9);
  125. q2 = vqsubq_s16(q6, q5);
  126. q3 = vqaddq_s16(q7, q4);
  127. q4 = vqaddq_s16(q10, q3);
  128. q5 = vqaddq_s16(q11, q2);
  129. q6 = vqsubq_s16(q11, q2);
  130. q7 = vqsubq_s16(q10, q3);
  131. q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
  132. q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
  133. q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
  134. vreinterpretq_s16_s32(q2tmp1.val[0]));
  135. q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
  136. vreinterpretq_s16_s32(q2tmp1.val[1]));
  137. // loop 2
  138. q8 = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2);
  139. q9 = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2);
  140. q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1);
  141. q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1);
  142. q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]);
  143. q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]);
  144. q10 = vshrq_n_s16(q10, 1);
  145. q11 = vshrq_n_s16(q11, 1);
  146. q10 = vqaddq_s16(q2tmp2.val[1], q10);
  147. q11 = vqaddq_s16(q2tmp3.val[1], q11);
  148. q8 = vqsubq_s16(q8, q11);
  149. q9 = vqaddq_s16(q9, q10);
  150. q4 = vqaddq_s16(q2, q9);
  151. q5 = vqaddq_s16(q3, q8);
  152. q6 = vqsubq_s16(q3, q8);
  153. q7 = vqsubq_s16(q2, q9);
  154. q4 = vrshrq_n_s16(q4, 3);
  155. q5 = vrshrq_n_s16(q5, 3);
  156. q6 = vrshrq_n_s16(q6, 3);
  157. q7 = vrshrq_n_s16(q7, 3);
  158. q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
  159. q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
  160. q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
  161. vreinterpretq_s16_s32(q2tmp1.val[0]));
  162. q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
  163. vreinterpretq_s16_s32(q2tmp1.val[1]));
  164. q4 = vreinterpretq_s16_u16(
  165. vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]), vreinterpret_u8_s32(d28)));
  166. q5 = vreinterpretq_s16_u16(
  167. vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]), vreinterpret_u8_s32(d29)));
  168. q6 = vreinterpretq_s16_u16(
  169. vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]), vreinterpret_u8_s32(d30)));
  170. q7 = vreinterpretq_s16_u16(
  171. vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]), vreinterpret_u8_s32(d31)));
  172. d28 = vreinterpret_s32_u8(vqmovun_s16(q4));
  173. d29 = vreinterpret_s32_u8(vqmovun_s16(q5));
  174. d30 = vreinterpret_s32_u8(vqmovun_s16(q6));
  175. d31 = vreinterpret_s32_u8(vqmovun_s16(q7));
  176. dst0 = dst;
  177. dst1 = dst + 4;
  178. vst1_lane_s32((int32_t *)dst0, d28, 0);
  179. dst0 += stride;
  180. vst1_lane_s32((int32_t *)dst1, d28, 1);
  181. dst1 += stride;
  182. vst1_lane_s32((int32_t *)dst0, d29, 0);
  183. dst0 += stride;
  184. vst1_lane_s32((int32_t *)dst1, d29, 1);
  185. dst1 += stride;
  186. vst1_lane_s32((int32_t *)dst0, d30, 0);
  187. dst0 += stride;
  188. vst1_lane_s32((int32_t *)dst1, d30, 1);
  189. dst1 += stride;
  190. vst1_lane_s32((int32_t *)dst0, d31, 0);
  191. vst1_lane_s32((int32_t *)dst1, d31, 1);
  192. }
  193. void vp8_dequant_idct_add_y_block_neon(short *q, short *dq, unsigned char *dst,
  194. int stride, char *eobs) {
  195. int i;
  196. for (i = 0; i < 4; ++i) {
  197. if (((short *)(eobs))[0]) {
  198. if (((short *)eobs)[0] & 0xfefe)
  199. idct_dequant_full_2x_neon(q, dq, dst, stride);
  200. else
  201. idct_dequant_0_2x_neon(q, dq[0], dst, stride);
  202. }
  203. if (((short *)(eobs))[1]) {
  204. if (((short *)eobs)[1] & 0xfefe)
  205. idct_dequant_full_2x_neon(q + 32, dq, dst + 8, stride);
  206. else
  207. idct_dequant_0_2x_neon(q + 32, dq[0], dst + 8, stride);
  208. }
  209. q += 64;
  210. dst += 4 * stride;
  211. eobs += 4;
  212. }
  213. }
  214. void vp8_dequant_idct_add_uv_block_neon(short *q, short *dq,
  215. unsigned char *dst_u,
  216. unsigned char *dst_v, int stride,
  217. char *eobs) {
  218. if (((short *)(eobs))[0]) {
  219. if (((short *)eobs)[0] & 0xfefe)
  220. idct_dequant_full_2x_neon(q, dq, dst_u, stride);
  221. else
  222. idct_dequant_0_2x_neon(q, dq[0], dst_u, stride);
  223. }
  224. q += 32;
  225. dst_u += 4 * stride;
  226. if (((short *)(eobs))[1]) {
  227. if (((short *)eobs)[1] & 0xfefe)
  228. idct_dequant_full_2x_neon(q, dq, dst_u, stride);
  229. else
  230. idct_dequant_0_2x_neon(q, dq[0], dst_u, stride);
  231. }
  232. q += 32;
  233. if (((short *)(eobs))[2]) {
  234. if (((short *)eobs)[2] & 0xfefe)
  235. idct_dequant_full_2x_neon(q, dq, dst_v, stride);
  236. else
  237. idct_dequant_0_2x_neon(q, dq[0], dst_v, stride);
  238. }
  239. q += 32;
  240. dst_v += 4 * stride;
  241. if (((short *)(eobs))[3]) {
  242. if (((short *)eobs)[3] & 0xfefe)
  243. idct_dequant_full_2x_neon(q, dq, dst_v, stride);
  244. else
  245. idct_dequant_0_2x_neon(q, dq[0], dst_v, stride);
  246. }
  247. }