hadamard_vsx.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "./vpx_dsp_rtcd.h"
  11. #include "vpx_dsp/ppc/types_vsx.h"
  12. #include "vpx_dsp/ppc/transpose_vsx.h"
  13. #include "vpx_dsp/ppc/bitdepth_conversion_vsx.h"
  14. static void vpx_hadamard_s16_8x8_one_pass(int16x8_t v[8]) {
  15. const int16x8_t b0 = vec_add(v[0], v[1]);
  16. const int16x8_t b1 = vec_sub(v[0], v[1]);
  17. const int16x8_t b2 = vec_add(v[2], v[3]);
  18. const int16x8_t b3 = vec_sub(v[2], v[3]);
  19. const int16x8_t b4 = vec_add(v[4], v[5]);
  20. const int16x8_t b5 = vec_sub(v[4], v[5]);
  21. const int16x8_t b6 = vec_add(v[6], v[7]);
  22. const int16x8_t b7 = vec_sub(v[6], v[7]);
  23. const int16x8_t c0 = vec_add(b0, b2);
  24. const int16x8_t c1 = vec_add(b1, b3);
  25. const int16x8_t c2 = vec_sub(b0, b2);
  26. const int16x8_t c3 = vec_sub(b1, b3);
  27. const int16x8_t c4 = vec_add(b4, b6);
  28. const int16x8_t c5 = vec_add(b5, b7);
  29. const int16x8_t c6 = vec_sub(b4, b6);
  30. const int16x8_t c7 = vec_sub(b5, b7);
  31. v[0] = vec_add(c0, c4);
  32. v[1] = vec_sub(c2, c6);
  33. v[2] = vec_sub(c0, c4);
  34. v[3] = vec_add(c2, c6);
  35. v[4] = vec_add(c3, c7);
  36. v[5] = vec_sub(c3, c7);
  37. v[6] = vec_sub(c1, c5);
  38. v[7] = vec_add(c1, c5);
  39. }
  40. void vpx_hadamard_8x8_vsx(const int16_t *src_diff, ptrdiff_t src_stride,
  41. tran_low_t *coeff) {
  42. int16x8_t v[8];
  43. v[0] = vec_vsx_ld(0, src_diff);
  44. v[1] = vec_vsx_ld(0, src_diff + src_stride);
  45. v[2] = vec_vsx_ld(0, src_diff + (2 * src_stride));
  46. v[3] = vec_vsx_ld(0, src_diff + (3 * src_stride));
  47. v[4] = vec_vsx_ld(0, src_diff + (4 * src_stride));
  48. v[5] = vec_vsx_ld(0, src_diff + (5 * src_stride));
  49. v[6] = vec_vsx_ld(0, src_diff + (6 * src_stride));
  50. v[7] = vec_vsx_ld(0, src_diff + (7 * src_stride));
  51. vpx_hadamard_s16_8x8_one_pass(v);
  52. vpx_transpose_s16_8x8(v);
  53. vpx_hadamard_s16_8x8_one_pass(v);
  54. store_tran_low(v[0], 0, coeff);
  55. store_tran_low(v[1], 0, coeff + 8);
  56. store_tran_low(v[2], 0, coeff + 16);
  57. store_tran_low(v[3], 0, coeff + 24);
  58. store_tran_low(v[4], 0, coeff + 32);
  59. store_tran_low(v[5], 0, coeff + 40);
  60. store_tran_low(v[6], 0, coeff + 48);
  61. store_tran_low(v[7], 0, coeff + 56);
  62. }
  63. void vpx_hadamard_16x16_vsx(const int16_t *src_diff, ptrdiff_t src_stride,
  64. tran_low_t *coeff) {
  65. int i;
  66. const uint16x8_t ones = vec_splat_u16(1);
  67. /* Rearrange 16x16 to 8x32 and remove stride.
  68. * Top left first. */
  69. vpx_hadamard_8x8_vsx(src_diff, src_stride, coeff);
  70. /* Top right. */
  71. vpx_hadamard_8x8_vsx(src_diff + 8 + 0 * src_stride, src_stride, coeff + 64);
  72. /* Bottom left. */
  73. vpx_hadamard_8x8_vsx(src_diff + 0 + 8 * src_stride, src_stride, coeff + 128);
  74. /* Bottom right. */
  75. vpx_hadamard_8x8_vsx(src_diff + 8 + 8 * src_stride, src_stride, coeff + 192);
  76. /* Overlay the 8x8 blocks and combine. */
  77. for (i = 0; i < 64; i += 8) {
  78. const int16x8_t a0 = load_tran_low(0, coeff);
  79. const int16x8_t a1 = load_tran_low(0, coeff + 64);
  80. const int16x8_t a2 = load_tran_low(0, coeff + 128);
  81. const int16x8_t a3 = load_tran_low(0, coeff + 192);
  82. /* Prevent the result from escaping int16_t. */
  83. const int16x8_t b0 = vec_sra(a0, ones);
  84. const int16x8_t b1 = vec_sra(a1, ones);
  85. const int16x8_t b2 = vec_sra(a2, ones);
  86. const int16x8_t b3 = vec_sra(a3, ones);
  87. const int16x8_t c0 = vec_add(b0, b1);
  88. const int16x8_t c2 = vec_add(b2, b3);
  89. const int16x8_t c1 = vec_sub(b0, b1);
  90. const int16x8_t c3 = vec_sub(b2, b3);
  91. const int16x8_t d0 = vec_add(c0, c2);
  92. const int16x8_t d1 = vec_add(c1, c3);
  93. const int16x8_t d2 = vec_sub(c0, c2);
  94. const int16x8_t d3 = vec_sub(c1, c3);
  95. store_tran_low(d0, 0, coeff);
  96. store_tran_low(d1, 0, coeff + 64);
  97. store_tran_low(d2, 0, coeff + 128);
  98. store_tran_low(d3, 0, coeff + 192);
  99. coeff += 8;
  100. }
  101. }