dequant_idct_neon.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <arm_neon.h>
  11. #include "./vp8_rtcd.h"
  12. static const int16_t cospi8sqrt2minus1 = 20091;
  13. // 35468 exceeds INT16_MAX and gets converted to a negative number. Because of
  14. // the way it is used in vqdmulh, where the result is doubled, it can be divided
  15. // by 2 beforehand. This saves compensating for the negative value as well as
  16. // shifting the result.
  17. static const int16_t sinpi8sqrt2 = 35468 >> 1;
  18. void vp8_dequant_idct_add_neon(int16_t *input, int16_t *dq, unsigned char *dst,
  19. int stride) {
  20. unsigned char *dst0;
  21. int32x2_t d14, d15;
  22. int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
  23. int16x8_t q1, q2, q3, q4, q5, q6;
  24. int16x8_t qEmpty = vdupq_n_s16(0);
  25. int32x2x2_t d2tmp0, d2tmp1;
  26. int16x4x2_t d2tmp2, d2tmp3;
  27. d14 = d15 = vdup_n_s32(0);
  28. // load input
  29. q3 = vld1q_s16(input);
  30. vst1q_s16(input, qEmpty);
  31. input += 8;
  32. q4 = vld1q_s16(input);
  33. vst1q_s16(input, qEmpty);
  34. // load dq
  35. q5 = vld1q_s16(dq);
  36. dq += 8;
  37. q6 = vld1q_s16(dq);
  38. // load src from dst
  39. dst0 = dst;
  40. d14 = vld1_lane_s32((const int32_t *)dst0, d14, 0);
  41. dst0 += stride;
  42. d14 = vld1_lane_s32((const int32_t *)dst0, d14, 1);
  43. dst0 += stride;
  44. d15 = vld1_lane_s32((const int32_t *)dst0, d15, 0);
  45. dst0 += stride;
  46. d15 = vld1_lane_s32((const int32_t *)dst0, d15, 1);
  47. q1 = vreinterpretq_s16_u16(
  48. vmulq_u16(vreinterpretq_u16_s16(q3), vreinterpretq_u16_s16(q5)));
  49. q2 = vreinterpretq_s16_u16(
  50. vmulq_u16(vreinterpretq_u16_s16(q4), vreinterpretq_u16_s16(q6)));
  51. d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2));
  52. d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2));
  53. q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2));
  54. q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
  55. q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
  56. q4 = vshrq_n_s16(q4, 1);
  57. q4 = vqaddq_s16(q4, q2);
  58. d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
  59. d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
  60. d2 = vqadd_s16(d12, d11);
  61. d3 = vqadd_s16(d13, d10);
  62. d4 = vqsub_s16(d13, d10);
  63. d5 = vqsub_s16(d12, d11);
  64. d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
  65. d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
  66. d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
  67. vreinterpret_s16_s32(d2tmp1.val[0]));
  68. d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
  69. vreinterpret_s16_s32(d2tmp1.val[1]));
  70. // loop 2
  71. q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]);
  72. q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
  73. q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
  74. d12 = vqadd_s16(d2tmp2.val[0], d2tmp3.val[0]);
  75. d13 = vqsub_s16(d2tmp2.val[0], d2tmp3.val[0]);
  76. q4 = vshrq_n_s16(q4, 1);
  77. q4 = vqaddq_s16(q4, q2);
  78. d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
  79. d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
  80. d2 = vqadd_s16(d12, d11);
  81. d3 = vqadd_s16(d13, d10);
  82. d4 = vqsub_s16(d13, d10);
  83. d5 = vqsub_s16(d12, d11);
  84. d2 = vrshr_n_s16(d2, 3);
  85. d3 = vrshr_n_s16(d3, 3);
  86. d4 = vrshr_n_s16(d4, 3);
  87. d5 = vrshr_n_s16(d5, 3);
  88. d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
  89. d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
  90. d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
  91. vreinterpret_s16_s32(d2tmp1.val[0]));
  92. d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
  93. vreinterpret_s16_s32(d2tmp1.val[1]));
  94. q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]);
  95. q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]);
  96. q1 = vreinterpretq_s16_u16(
  97. vaddw_u8(vreinterpretq_u16_s16(q1), vreinterpret_u8_s32(d14)));
  98. q2 = vreinterpretq_s16_u16(
  99. vaddw_u8(vreinterpretq_u16_s16(q2), vreinterpret_u8_s32(d15)));
  100. d14 = vreinterpret_s32_u8(vqmovun_s16(q1));
  101. d15 = vreinterpret_s32_u8(vqmovun_s16(q2));
  102. dst0 = dst;
  103. vst1_lane_s32((int32_t *)dst0, d14, 0);
  104. dst0 += stride;
  105. vst1_lane_s32((int32_t *)dst0, d14, 1);
  106. dst0 += stride;
  107. vst1_lane_s32((int32_t *)dst0, d15, 0);
  108. dst0 += stride;
  109. vst1_lane_s32((int32_t *)dst0, d15, 1);
  110. return;
  111. }