2
0

idct8x8_msa.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "./vpx_dsp_rtcd.h"
  11. #include "vpx_dsp/mips/inv_txfm_msa.h"
  12. void vpx_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
  13. int32_t dst_stride) {
  14. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  15. /* load vector elements of 8x8 block */
  16. LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
  17. /* rows transform */
  18. TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  19. in4, in5, in6, in7);
  20. /* 1D idct8x8 */
  21. VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  22. in4, in5, in6, in7);
  23. /* columns transform */
  24. TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  25. in4, in5, in6, in7);
  26. /* 1D idct8x8 */
  27. VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  28. in4, in5, in6, in7);
  29. /* final rounding (add 2^4, divide by 2^5) and shift */
  30. SRARI_H4_SH(in0, in1, in2, in3, 5);
  31. SRARI_H4_SH(in4, in5, in6, in7, 5);
  32. /* add block and store 8x8 */
  33. VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
  34. dst += (4 * dst_stride);
  35. VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
  36. }
  37. void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
  38. int32_t dst_stride) {
  39. v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
  40. v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
  41. v4i32 tmp0, tmp1, tmp2, tmp3;
  42. v8i16 zero = { 0 };
  43. /* load vector elements of 8x8 block */
  44. LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
  45. TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
  46. /* stage1 */
  47. ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
  48. k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
  49. k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
  50. k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
  51. k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
  52. DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
  53. SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
  54. PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
  55. PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
  56. BUTTERFLY_4(s0, s1, s3, s2, s4, s7, s6, s5);
  57. /* stage2 */
  58. ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
  59. k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
  60. k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
  61. k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
  62. k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
  63. DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
  64. SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
  65. PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
  66. PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
  67. BUTTERFLY_4(s0, s1, s2, s3, m0, m1, m2, m3);
  68. /* stage3 */
  69. s0 = __msa_ilvr_h(s6, s5);
  70. k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
  71. DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
  72. SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS);
  73. PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
  74. /* stage4 */
  75. BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7, in0, in1, in2, in3, in4, in5, in6,
  76. in7);
  77. TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  78. in4, in5, in6, in7);
  79. VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
  80. in4, in5, in6, in7);
  81. /* final rounding (add 2^4, divide by 2^5) and shift */
  82. SRARI_H4_SH(in0, in1, in2, in3, 5);
  83. SRARI_H4_SH(in4, in5, in6, in7, 5);
  84. /* add block and store 8x8 */
  85. VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
  86. dst += (4 * dst_stride);
  87. VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
  88. }
  89. void vpx_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst,
  90. int32_t dst_stride) {
  91. int16_t out;
  92. int32_t val;
  93. v8i16 vec;
  94. out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
  95. out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
  96. val = ROUND_POWER_OF_TWO(out, 5);
  97. vec = __msa_fill_h(val);
  98. VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
  99. dst += (4 * dst_stride);
  100. VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
  101. }